intel_pm.c 277.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright © 2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
 *
 */

28
#include <linux/cpufreq.h>
29
#include <drm/drm_plane_helper.h>
30 31
#include "i915_drv.h"
#include "intel_drv.h"
32 33
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
34
#include <drm/drm_atomic_helper.h>
35

B
Ben Widawsky 已提交
36
/**
37 38
 * DOC: RC6
 *
B
Ben Widawsky 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
 * RC6 is a special power stage which allows the GPU to enter an very
 * low-voltage mode when idle, using down to 0V while at this stage.  This
 * stage is entered automatically when the GPU is idle when RC6 support is
 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
 *
 * There are different RC6 modes available in Intel GPU, which differentiate
 * among each other with the latency required to enter and leave RC6 and
 * voltage consumed by the GPU in different states.
 *
 * The combination of the following flags define which states GPU is allowed
 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
 * RC6pp is deepest RC6. Their support by hardware varies according to the
 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
 * which brings the most power savings; deeper states save more power, but
 * require higher latency to switch to and wake up.
 */

56
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
57
{
58 59 60
	if (HAS_LLC(dev_priv)) {
		/*
		 * WaCompressedResourceDisplayNewHashMode:skl,kbl
61
		 * Display WA #0390: skl,kbl
62 63 64 65 66 67 68 69 70
		 *
		 * Must match Sampler, Pixel Back End, and Media. See
		 * WaCompressedResourceSamplerPbeMediaNewHashMode.
		 */
		I915_WRITE(CHICKEN_PAR1_1,
			   I915_READ(CHICKEN_PAR1_1) |
			   SKL_DE_COMPRESSED_HASH_MODE);
	}

71
	/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
72 73 74
	I915_WRITE(CHICKEN_PAR1_1,
		   I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);

75
	/* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
76 77
	I915_WRITE(GEN8_CHICKEN_DCPR_1,
		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
78

79 80
	/* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */
	/* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */
81 82 83
	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
		   DISP_FBC_WM_DIS |
		   DISP_FBC_MEMORY_WAKE);
84

85
	/* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
86 87
	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
		   ILK_DPFC_DISABLE_DUMMY0);
88 89 90 91 92 93

	if (IS_SKYLAKE(dev_priv)) {
		/* WaDisableDopClockGating */
		I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
			   & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	}
94 95
}

96
static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
97
{
98
	gen9_init_clock_gating(dev_priv);
99

100 101 102 103
	/* WaDisableSDEUnitClockGating:bxt */
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);

104 105
	/*
	 * FIXME:
106
	 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
107 108
	 */
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
109
		   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
110 111 112 113 114

	/*
	 * Wa: Backlight PWM may stop in the asserted state, causing backlight
	 * to stay fully on.
	 */
115 116
	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
		   PWM1_GATING_DIS | PWM2_GATING_DIS);
117 118
}

119 120 121 122 123 124 125 126 127 128 129
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
{
	gen9_init_clock_gating(dev_priv);

	/*
	 * WaDisablePWMClockGating:glk
	 * Backlight PWM may stop in the asserted state, causing backlight
	 * to stay fully on.
	 */
	I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
		   PWM1_GATING_DIS | PWM2_GATING_DIS);
130 131 132 133 134 135 136 137 138 139

	/* WaDDIIOTimeout:glk */
	if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
		u32 val = I915_READ(CHICKEN_MISC_2);
		val &= ~(GLK_CL0_PWR_DOWN |
			 GLK_CL1_PWR_DOWN |
			 GLK_CL2_PWR_DOWN);
		I915_WRITE(CHICKEN_MISC_2, val);
	}

140 141
}

142
static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
{
	u32 tmp;

	tmp = I915_READ(CLKCFG);

	switch (tmp & CLKCFG_FSB_MASK) {
	case CLKCFG_FSB_533:
		dev_priv->fsb_freq = 533; /* 133*4 */
		break;
	case CLKCFG_FSB_800:
		dev_priv->fsb_freq = 800; /* 200*4 */
		break;
	case CLKCFG_FSB_667:
		dev_priv->fsb_freq =  667; /* 167*4 */
		break;
	case CLKCFG_FSB_400:
		dev_priv->fsb_freq = 400; /* 100*4 */
		break;
	}

	switch (tmp & CLKCFG_MEM_MASK) {
	case CLKCFG_MEM_533:
		dev_priv->mem_freq = 533;
		break;
	case CLKCFG_MEM_667:
		dev_priv->mem_freq = 667;
		break;
	case CLKCFG_MEM_800:
		dev_priv->mem_freq = 800;
		break;
	}

	/* detect pineview DDR3 setting */
	tmp = I915_READ(CSHRDDR3CTL);
	dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}

180
static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv)
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
{
	u16 ddrpll, csipll;

	ddrpll = I915_READ16(DDRMPLL1);
	csipll = I915_READ16(CSIPLL0);

	switch (ddrpll & 0xff) {
	case 0xc:
		dev_priv->mem_freq = 800;
		break;
	case 0x10:
		dev_priv->mem_freq = 1066;
		break;
	case 0x14:
		dev_priv->mem_freq = 1333;
		break;
	case 0x18:
		dev_priv->mem_freq = 1600;
		break;
	default:
		DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
				 ddrpll & 0xff);
		dev_priv->mem_freq = 0;
		break;
	}

207
	dev_priv->ips.r_t = dev_priv->mem_freq;
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238

	switch (csipll & 0x3ff) {
	case 0x00c:
		dev_priv->fsb_freq = 3200;
		break;
	case 0x00e:
		dev_priv->fsb_freq = 3733;
		break;
	case 0x010:
		dev_priv->fsb_freq = 4266;
		break;
	case 0x012:
		dev_priv->fsb_freq = 4800;
		break;
	case 0x014:
		dev_priv->fsb_freq = 5333;
		break;
	case 0x016:
		dev_priv->fsb_freq = 5866;
		break;
	case 0x018:
		dev_priv->fsb_freq = 6400;
		break;
	default:
		DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
				 csipll & 0x3ff);
		dev_priv->fsb_freq = 0;
		break;
	}

	if (dev_priv->fsb_freq == 3200) {
239
		dev_priv->ips.c_m = 0;
240
	} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
241
		dev_priv->ips.c_m = 1;
242
	} else {
243
		dev_priv->ips.c_m = 2;
244 245 246
	}
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
static const struct cxsr_latency cxsr_latency_table[] = {
	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */

	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */

	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */

	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */

	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */

	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
};

285 286
static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
							 bool is_ddr3,
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
							 int fsb,
							 int mem)
{
	const struct cxsr_latency *latency;
	int i;

	if (fsb == 0 || mem == 0)
		return NULL;

	for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
		latency = &cxsr_latency_table[i];
		if (is_desktop == latency->is_desktop &&
		    is_ddr3 == latency->is_ddr3 &&
		    fsb == latency->fsb_freq && mem == latency->mem_freq)
			return latency;
	}

	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");

	return NULL;
}

309 310 311 312
static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
{
	u32 val;

313
	mutex_lock(&dev_priv->pcu_lock);
314 315 316 317 318 319 320 321 322 323 324 325 326 327

	val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
	if (enable)
		val &= ~FORCE_DDR_HIGH_FREQ;
	else
		val |= FORCE_DDR_HIGH_FREQ;
	val &= ~FORCE_DDR_LOW_FREQ;
	val |= FORCE_DDR_FREQ_REQ_ACK;
	vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);

	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
		      FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
		DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");

328
	mutex_unlock(&dev_priv->pcu_lock);
329 330
}

331 332 333 334
static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
{
	u32 val;

335
	mutex_lock(&dev_priv->pcu_lock);
336 337 338 339 340 341 342 343

	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
	if (enable)
		val |= DSP_MAXFIFO_PM5_ENABLE;
	else
		val &= ~DSP_MAXFIFO_PM5_ENABLE;
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);

344
	mutex_unlock(&dev_priv->pcu_lock);
345 346
}

347 348 349
#define FW_WM(value, plane) \
	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)

350
static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
351
{
352
	bool was_enabled;
353
	u32 val;
354

355
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
356
		was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
357
		I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
358
		POSTING_READ(FW_BLC_SELF_VLV);
359
	} else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
360
		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
361
		I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
362
		POSTING_READ(FW_BLC_SELF);
363
	} else if (IS_PINEVIEW(dev_priv)) {
364 365 366 367 368 369
		val = I915_READ(DSPFW3);
		was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
		if (enable)
			val |= PINEVIEW_SELF_REFRESH_EN;
		else
			val &= ~PINEVIEW_SELF_REFRESH_EN;
370
		I915_WRITE(DSPFW3, val);
371
		POSTING_READ(DSPFW3);
372
	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
373
		was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
374 375 376
		val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
			       _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
		I915_WRITE(FW_BLC_SELF, val);
377
		POSTING_READ(FW_BLC_SELF);
378
	} else if (IS_I915GM(dev_priv)) {
379 380 381 382 383
		/*
		 * FIXME can't find a bit like this for 915G, and
		 * and yet it does have the related watermark in
		 * FW_BLC_SELF. What's going on?
		 */
384
		was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
385 386 387
		val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
			       _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
		I915_WRITE(INSTPM, val);
388
		POSTING_READ(INSTPM);
389
	} else {
390
		return false;
391
	}
392

393 394
	trace_intel_memory_cxsr(dev_priv, was_enabled, enable);

395 396 397 398 399
	DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
		      enableddisabled(enable),
		      enableddisabled(was_enabled));

	return was_enabled;
400 401
}

V
Ville Syrjälä 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
/**
 * intel_set_memory_cxsr - Configure CxSR state
 * @dev_priv: i915 device
 * @enable: Allow vs. disallow CxSR
 *
 * Allow or disallow the system to enter a special CxSR
 * (C-state self refresh) state. What typically happens in CxSR mode
 * is that several display FIFOs may get combined into a single larger
 * FIFO for a particular plane (so called max FIFO mode) to allow the
 * system to defer memory fetches longer, and the memory will enter
 * self refresh.
 *
 * Note that enabling CxSR does not guarantee that the system enter
 * this special mode, nor does it guarantee that the system stays
 * in that mode once entered. So this just allows/disallows the system
 * to autonomously utilize the CxSR mode. Other factors such as core
 * C-states will affect when/if the system actually enters/exits the
 * CxSR mode.
 *
 * Note that on VLV/CHV this actually only controls the max FIFO mode,
 * and the system is free to enter/exit memory self refresh at any time
 * even when the use of CxSR has been disallowed.
 *
 * While the system is actually in the CxSR/max FIFO mode, some plane
 * control registers will not get latched on vblank. Thus in order to
 * guarantee the system will respond to changes in the plane registers
 * we must always disallow CxSR prior to making changes to those registers.
 * Unfortunately the system will re-evaluate the CxSR conditions at
 * frame start which happens after vblank start (which is when the plane
 * registers would get latched), so we can't proceed with the plane update
 * during the same frame where we disallowed CxSR.
 *
 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
 * the hardware w.r.t. HPLL SR when writing to plane registers.
 * Disallowing just CxSR is sufficient.
 */
439
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
440
{
441 442
	bool ret;

443
	mutex_lock(&dev_priv->wm.wm_mutex);
444
	ret = _intel_set_memory_cxsr(dev_priv, enable);
445 446 447 448
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		dev_priv->wm.vlv.cxsr = enable;
	else if (IS_G4X(dev_priv))
		dev_priv->wm.g4x.cxsr = enable;
449
	mutex_unlock(&dev_priv->wm.wm_mutex);
450 451

	return ret;
452
}
453

454 455 456 457 458 459 460 461 462 463 464 465 466 467
/*
 * Latency for FIFO fetches is dependent on several factors:
 *   - memory configuration (speed, channels)
 *   - chipset
 *   - current MCH state
 * It can be fairly high in some situations, so here we assume a fairly
 * pessimal value.  It's a tradeoff between extra memory fetches (if we
 * set this value too high, the FIFO will fetch frequently to stay full)
 * and power consumption (set it too low to save power and we might see
 * FIFO underruns and display "flicker").
 *
 * A value of 5us seems to be a good balance; safe for very low end
 * platforms but not overly aggressive on lower latency configs.
 */
468
static const int pessimal_latency_ns = 5000;
469

470 471 472
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
	((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))

473
static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
474
{
475
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
476
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
477
	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
478 479
	enum pipe pipe = crtc->pipe;
	int sprite0_start, sprite1_start;
480

481
	switch (pipe) {
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
		uint32_t dsparb, dsparb2, dsparb3;
	case PIPE_A:
		dsparb = I915_READ(DSPARB);
		dsparb2 = I915_READ(DSPARB2);
		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
		break;
	case PIPE_B:
		dsparb = I915_READ(DSPARB);
		dsparb2 = I915_READ(DSPARB2);
		sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
		sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
		break;
	case PIPE_C:
		dsparb2 = I915_READ(DSPARB2);
		dsparb3 = I915_READ(DSPARB3);
		sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
		sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
		break;
	default:
502 503
		MISSING_CASE(pipe);
		return;
504 505
	}

506 507 508 509
	fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
	fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
	fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
	fifo_state->plane[PLANE_CURSOR] = 63;
510 511
}

512 513
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
			      enum i9xx_plane_id i9xx_plane)
514 515 516 517 518
{
	uint32_t dsparb = I915_READ(DSPARB);
	int size;

	size = dsparb & 0x7f;
519
	if (i9xx_plane == PLANE_B)
520 521
		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;

522 523
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
		      dsparb, plane_name(i9xx_plane), size);
524 525 526 527

	return size;
}

528 529
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
			      enum i9xx_plane_id i9xx_plane)
530 531 532 533 534
{
	uint32_t dsparb = I915_READ(DSPARB);
	int size;

	size = dsparb & 0x1ff;
535
	if (i9xx_plane == PLANE_B)
536 537 538
		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
	size >>= 1; /* Convert to cachelines */

539 540
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
		      dsparb, plane_name(i9xx_plane), size);
541 542 543 544

	return size;
}

545 546
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
			      enum i9xx_plane_id i9xx_plane)
547 548 549 550 551 552 553
{
	uint32_t dsparb = I915_READ(DSPARB);
	int size;

	size = dsparb & 0x7f;
	size >>= 2; /* Convert to cachelines */

554 555
	DRM_DEBUG_KMS("FIFO size - (0x%08x) %c: %d\n",
		      dsparb, plane_name(i9xx_plane), size);
556 557 558 559 560 561

	return size;
}

/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
562 563 564 565 566
	.fifo_size = PINEVIEW_DISPLAY_FIFO,
	.max_wm = PINEVIEW_MAX_WM,
	.default_wm = PINEVIEW_DFT_WM,
	.guard_size = PINEVIEW_GUARD_WM,
	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
567 568
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
569 570 571 572 573
	.fifo_size = PINEVIEW_DISPLAY_FIFO,
	.max_wm = PINEVIEW_MAX_WM,
	.default_wm = PINEVIEW_DFT_HPLLOFF_WM,
	.guard_size = PINEVIEW_GUARD_WM,
	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
574 575
};
static const struct intel_watermark_params pineview_cursor_wm = {
576 577 578 579 580
	.fifo_size = PINEVIEW_CURSOR_FIFO,
	.max_wm = PINEVIEW_CURSOR_MAX_WM,
	.default_wm = PINEVIEW_CURSOR_DFT_WM,
	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
581 582
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
583 584 585 586 587
	.fifo_size = PINEVIEW_CURSOR_FIFO,
	.max_wm = PINEVIEW_CURSOR_MAX_WM,
	.default_wm = PINEVIEW_CURSOR_DFT_WM,
	.guard_size = PINEVIEW_CURSOR_GUARD_WM,
	.cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
588 589
};
static const struct intel_watermark_params i965_cursor_wm_info = {
590 591 592 593 594
	.fifo_size = I965_CURSOR_FIFO,
	.max_wm = I965_CURSOR_MAX_WM,
	.default_wm = I965_CURSOR_DFT_WM,
	.guard_size = 2,
	.cacheline_size = I915_FIFO_LINE_SIZE,
595 596
};
static const struct intel_watermark_params i945_wm_info = {
597 598 599 600 601
	.fifo_size = I945_FIFO_SIZE,
	.max_wm = I915_MAX_WM,
	.default_wm = 1,
	.guard_size = 2,
	.cacheline_size = I915_FIFO_LINE_SIZE,
602 603
};
static const struct intel_watermark_params i915_wm_info = {
604 605 606 607 608
	.fifo_size = I915_FIFO_SIZE,
	.max_wm = I915_MAX_WM,
	.default_wm = 1,
	.guard_size = 2,
	.cacheline_size = I915_FIFO_LINE_SIZE,
609
};
610
static const struct intel_watermark_params i830_a_wm_info = {
611 612 613 614 615
	.fifo_size = I855GM_FIFO_SIZE,
	.max_wm = I915_MAX_WM,
	.default_wm = 1,
	.guard_size = 2,
	.cacheline_size = I830_FIFO_LINE_SIZE,
616
};
617 618 619 620 621 622 623
static const struct intel_watermark_params i830_bc_wm_info = {
	.fifo_size = I855GM_FIFO_SIZE,
	.max_wm = I915_MAX_WM/2,
	.default_wm = 1,
	.guard_size = 2,
	.cacheline_size = I830_FIFO_LINE_SIZE,
};
624
static const struct intel_watermark_params i845_wm_info = {
625 626 627 628 629
	.fifo_size = I830_FIFO_SIZE,
	.max_wm = I915_MAX_WM,
	.default_wm = 1,
	.guard_size = 2,
	.cacheline_size = I830_FIFO_LINE_SIZE,
630 631
};

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
/**
 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
 * @pixel_rate: Pipe pixel rate in kHz
 * @cpp: Plane bytes per pixel
 * @latency: Memory wakeup latency in 0.1us units
 *
 * Compute the watermark using the method 1 or "small buffer"
 * formula. The caller may additonally add extra cachelines
 * to account for TLB misses and clock crossings.
 *
 * This method is concerned with the short term drain rate
 * of the FIFO, ie. it does not account for blanking periods
 * which would effectively reduce the average drain rate across
 * a longer period. The name "small" refers to the fact the
 * FIFO is relatively small compared to the amount of data
 * fetched.
 *
 * The FIFO level vs. time graph might look something like:
 *
 *   |\   |\
 *   | \  | \
 * __---__---__ (- plane active, _ blanking)
 * -> time
 *
 * or perhaps like this:
 *
 *   |\|\  |\|\
 * __----__----__ (- plane active, _ blanking)
 * -> time
 *
 * Returns:
 * The watermark in bytes
 */
static unsigned int intel_wm_method1(unsigned int pixel_rate,
				     unsigned int cpp,
				     unsigned int latency)
{
	uint64_t ret;

	ret = (uint64_t) pixel_rate * cpp * latency;
	ret = DIV_ROUND_UP_ULL(ret, 10000);

	return ret;
}

/**
 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
 * @pixel_rate: Pipe pixel rate in kHz
 * @htotal: Pipe horizontal total
 * @width: Plane width in pixels
 * @cpp: Plane bytes per pixel
 * @latency: Memory wakeup latency in 0.1us units
 *
 * Compute the watermark using the method 2 or "large buffer"
 * formula. The caller may additonally add extra cachelines
 * to account for TLB misses and clock crossings.
 *
 * This method is concerned with the long term drain rate
 * of the FIFO, ie. it does account for blanking periods
 * which effectively reduce the average drain rate across
 * a longer period. The name "large" refers to the fact the
 * FIFO is relatively large compared to the amount of data
 * fetched.
 *
 * The FIFO level vs. time graph might look something like:
 *
 *    |\___       |\___
 *    |    \___   |    \___
 *    |        \  |        \
 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
 * -> time
 *
 * Returns:
 * The watermark in bytes
 */
static unsigned int intel_wm_method2(unsigned int pixel_rate,
				     unsigned int htotal,
				     unsigned int width,
				     unsigned int cpp,
				     unsigned int latency)
{
	unsigned int ret;

	/*
	 * FIXME remove once all users are computing
	 * watermarks in the correct place.
	 */
	if (WARN_ON_ONCE(htotal == 0))
		htotal = 1;

	ret = (latency * pixel_rate) / (htotal * 10000);
	ret = (ret + 1) * width * cpp;

	return ret;
}

728 729
/**
 * intel_calculate_wm - calculate watermark level
730
 * @pixel_rate: pixel clock
731
 * @wm: chip FIFO params
732
 * @fifo_size: size of the FIFO buffer
733
 * @cpp: bytes per pixel
734 735 736 737 738 739 740 741 742 743 744 745 746
 * @latency_ns: memory latency for the platform
 *
 * Calculate the watermark level (the level at which the display plane will
 * start fetching from memory again).  Each chip has a different display
 * FIFO size and allocation, so the caller needs to figure that out and pass
 * in the correct intel_watermark_params structure.
 *
 * As the pixel clock runs, the FIFO will be drained at a rate that depends
 * on the pixel size.  When it reaches the watermark level, it'll start
 * fetching FIFO line sized based chunks from memory until the FIFO fills
 * past the watermark point.  If the FIFO drains completely, a FIFO underrun
 * will occur, and a display engine hang could result.
 */
747 748 749 750
static unsigned int intel_calculate_wm(int pixel_rate,
				       const struct intel_watermark_params *wm,
				       int fifo_size, int cpp,
				       unsigned int latency_ns)
751
{
752
	int entries, wm_size;
753 754 755 756 757 758 759

	/*
	 * Note: we need to make sure we don't overflow for various clock &
	 * latency values.
	 * clocks go from a few thousand to several hundred thousand.
	 * latency is usually a few thousand
	 */
760 761 762 763 764
	entries = intel_wm_method1(pixel_rate, cpp,
				   latency_ns / 100);
	entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
		wm->guard_size;
	DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
765

766 767
	wm_size = fifo_size - entries;
	DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
768 769

	/* Don't promote wm_size to unsigned... */
770
	if (wm_size > wm->max_wm)
771 772 773
		wm_size = wm->max_wm;
	if (wm_size <= 0)
		wm_size = wm->default_wm;
774 775 776 777 778 779 780 781 782 783 784

	/*
	 * Bspec seems to indicate that the value shouldn't be lower than
	 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
	 * Lets go for 8 which is the burst size since certain platforms
	 * already use a hardcoded 8 (which is what the spec says should be
	 * done).
	 */
	if (wm_size <= 8)
		wm_size = 8;

785 786 787
	return wm_size;
}

788 789 790 791 792 793 794 795 796 797
static bool is_disabling(int old, int new, int threshold)
{
	return old >= threshold && new < threshold;
}

static bool is_enabling(int old, int new, int threshold)
{
	return old < threshold && new >= threshold;
}

798 799 800 801 802
static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
{
	return dev_priv->wm.max_level + 1;
}

803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
				   const struct intel_plane_state *plane_state)
{
	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);

	/* FIXME check the 'enable' instead */
	if (!crtc_state->base.active)
		return false;

	/*
	 * Treat cursor with fb as always visible since cursor updates
	 * can happen faster than the vrefresh rate, and the current
	 * watermark code doesn't handle that correctly. Cursor updates
	 * which set/clear the fb or change the cursor size are going
	 * to get throttled by intel_legacy_cursor_update() to work
	 * around this problem with the watermark code.
	 */
	if (plane->id == PLANE_CURSOR)
		return plane_state->base.fb != NULL;
	else
		return plane_state->base.visible;
}

826
static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
827
{
828
	struct intel_crtc *crtc, *enabled = NULL;
829

830
	for_each_intel_crtc(&dev_priv->drm, crtc) {
831
		if (intel_crtc_active(crtc)) {
832 833 834 835 836 837 838 839 840
			if (enabled)
				return NULL;
			enabled = crtc;
		}
	}

	return enabled;
}

841
static void pineview_update_wm(struct intel_crtc *unused_crtc)
842
{
843
	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
844
	struct intel_crtc *crtc;
845 846
	const struct cxsr_latency *latency;
	u32 reg;
847
	unsigned int wm;
848

849 850 851 852
	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
					 dev_priv->is_ddr3,
					 dev_priv->fsb_freq,
					 dev_priv->mem_freq);
853 854
	if (!latency) {
		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
855
		intel_set_memory_cxsr(dev_priv, false);
856 857 858
		return;
	}

859
	crtc = single_enabled_crtc(dev_priv);
860
	if (crtc) {
861 862 863 864
		const struct drm_display_mode *adjusted_mode =
			&crtc->config->base.adjusted_mode;
		const struct drm_framebuffer *fb =
			crtc->base.primary->state->fb;
865
		int cpp = fb->format->cpp[0];
866
		int clock = adjusted_mode->crtc_clock;
867 868 869 870

		/* Display SR */
		wm = intel_calculate_wm(clock, &pineview_display_wm,
					pineview_display_wm.fifo_size,
871
					cpp, latency->display_sr);
872 873
		reg = I915_READ(DSPFW1);
		reg &= ~DSPFW_SR_MASK;
874
		reg |= FW_WM(wm, SR);
875 876 877 878 879 880
		I915_WRITE(DSPFW1, reg);
		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);

		/* cursor SR */
		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
					pineview_display_wm.fifo_size,
881
					4, latency->cursor_sr);
882 883
		reg = I915_READ(DSPFW3);
		reg &= ~DSPFW_CURSOR_SR_MASK;
884
		reg |= FW_WM(wm, CURSOR_SR);
885 886 887 888 889
		I915_WRITE(DSPFW3, reg);

		/* Display HPLL off SR */
		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
					pineview_display_hplloff_wm.fifo_size,
890
					cpp, latency->display_hpll_disable);
891 892
		reg = I915_READ(DSPFW3);
		reg &= ~DSPFW_HPLL_SR_MASK;
893
		reg |= FW_WM(wm, HPLL_SR);
894 895 896 897 898
		I915_WRITE(DSPFW3, reg);

		/* cursor HPLL off SR */
		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
					pineview_display_hplloff_wm.fifo_size,
899
					4, latency->cursor_hpll_disable);
900 901
		reg = I915_READ(DSPFW3);
		reg &= ~DSPFW_HPLL_CURSOR_MASK;
902
		reg |= FW_WM(wm, HPLL_CURSOR);
903 904 905
		I915_WRITE(DSPFW3, reg);
		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);

906
		intel_set_memory_cxsr(dev_priv, true);
907
	} else {
908
		intel_set_memory_cxsr(dev_priv, false);
909 910 911
	}
}

912 913 914 915 916 917 918 919 920 921
/*
 * Documentation says:
 * "If the line size is small, the TLB fetches can get in the way of the
 *  data fetches, causing some lag in the pixel data return which is not
 *  accounted for in the above formulas. The following adjustment only
 *  needs to be applied if eight whole lines fit in the buffer at once.
 *  The WM is adjusted upwards by the difference between the FIFO size
 *  and the size of 8 whole lines. This adjustment is always performed
 *  in the actual pixel depth regardless of whether FBC is enabled or not."
 */
922
static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
923 924 925 926 927 928
{
	int tlb_miss = fifo_size * 64 - width * cpp * 8;

	return max(0, tlb_miss);
}

929 930
static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
				const struct g4x_wm_values *wm)
931
{
932 933 934 935 936
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe)
		trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
	I915_WRITE(DSPFW1,
		   FW_WM(wm->sr.plane, SR) |
		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
	I915_WRITE(DSPFW2,
		   (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
		   FW_WM(wm->sr.fbc, FBC_SR) |
		   FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
	I915_WRITE(DSPFW3,
		   (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
		   FW_WM(wm->sr.cursor, CURSOR_SR) |
		   FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
		   FW_WM(wm->hpll.plane, HPLL_SR));
954

955
	POSTING_READ(DSPFW1);
956 957
}

958 959 960
#define FW_WM_VLV(value, plane) \
	(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)

961
static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
962 963
				const struct vlv_wm_values *wm)
{
964 965 966
	enum pipe pipe;

	for_each_pipe(dev_priv, pipe) {
967 968
		trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);

969 970 971 972 973 974
		I915_WRITE(VLV_DDL(pipe),
			   (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
			   (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
			   (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
			   (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
	}
975

976 977 978 979 980 981 982 983 984 985 986
	/*
	 * Zero the (unused) WM1 watermarks, and also clear all the
	 * high order bits so that there are no out of bounds values
	 * present in the registers during the reprogramming.
	 */
	I915_WRITE(DSPHOWM, 0);
	I915_WRITE(DSPHOWM1, 0);
	I915_WRITE(DSPFW4, 0);
	I915_WRITE(DSPFW5, 0);
	I915_WRITE(DSPFW6, 0);

987
	I915_WRITE(DSPFW1,
988
		   FW_WM(wm->sr.plane, SR) |
989 990 991
		   FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
		   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
992
	I915_WRITE(DSPFW2,
993 994 995
		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
		   FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
		   FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
996
	I915_WRITE(DSPFW3,
997
		   FW_WM(wm->sr.cursor, CURSOR_SR));
998 999 1000

	if (IS_CHERRYVIEW(dev_priv)) {
		I915_WRITE(DSPFW7_CHV,
1001 1002
			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1003
		I915_WRITE(DSPFW8_CHV,
1004 1005
			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
1006
		I915_WRITE(DSPFW9_CHV,
1007 1008
			   FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
1009
		I915_WRITE(DSPHOWM,
1010
			   FW_WM(wm->sr.plane >> 9, SR_HI) |
1011 1012 1013 1014 1015 1016 1017 1018 1019
			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
			   FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1020 1021
	} else {
		I915_WRITE(DSPFW7,
1022 1023
			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
			   FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
1024
		I915_WRITE(DSPHOWM,
1025
			   FW_WM(wm->sr.plane >> 9, SR_HI) |
1026 1027 1028 1029 1030 1031
			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
			   FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
			   FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
1032 1033 1034
	}

	POSTING_READ(DSPFW1);
1035 1036
}

1037 1038
#undef FW_WM_VLV

1039 1040 1041 1042 1043
static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
{
	/* all latencies in usec */
	dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
1044
	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
1045

1046
	dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
}

static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
{
	/*
	 * DSPCNTR[13] supposedly controls whether the
	 * primary plane can use the FIFO space otherwise
	 * reserved for the sprite plane. It's not 100% clear
	 * what the actual FIFO size is, but it looks like we
	 * can happily set both primary and sprite watermarks
	 * up to 127 cachelines. So that would seem to mean
	 * that either DSPCNTR[13] doesn't do anything, or that
	 * the total FIFO is >= 256 cachelines in size. Either
	 * way, we don't seem to have to worry about this
	 * repartitioning as the maximum watermark value the
	 * register can hold for each plane is lower than the
	 * minimum FIFO size.
	 */
	switch (plane_id) {
	case PLANE_CURSOR:
		return 63;
	case PLANE_PRIMARY:
		return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
	case PLANE_SPRITE0:
		return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
	default:
		MISSING_CASE(plane_id);
		return 0;
	}
}

static int g4x_fbc_fifo_size(int level)
{
	switch (level) {
	case G4X_WM_LEVEL_SR:
		return 7;
	case G4X_WM_LEVEL_HPLL:
		return 15;
	default:
		MISSING_CASE(level);
		return 0;
	}
}

static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
			       const struct intel_plane_state *plane_state,
			       int level)
{
	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
	const struct drm_display_mode *adjusted_mode =
		&crtc_state->base.adjusted_mode;
1099 1100
	unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
	unsigned int clock, htotal, cpp, width, wm;
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138

	if (latency == 0)
		return USHRT_MAX;

	if (!intel_wm_plane_visible(crtc_state, plane_state))
		return 0;

	/*
	 * Not 100% sure which way ELK should go here as the
	 * spec only says CL/CTG should assume 32bpp and BW
	 * doesn't need to. But as these things followed the
	 * mobile vs. desktop lines on gen3 as well, let's
	 * assume ELK doesn't need this.
	 *
	 * The spec also fails to list such a restriction for
	 * the HPLL watermark, which seems a little strange.
	 * Let's use 32bpp for the HPLL watermark as well.
	 */
	if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
	    level != G4X_WM_LEVEL_NORMAL)
		cpp = 4;
	else
		cpp = plane_state->base.fb->format->cpp[0];

	clock = adjusted_mode->crtc_clock;
	htotal = adjusted_mode->crtc_htotal;

	if (plane->id == PLANE_CURSOR)
		width = plane_state->base.crtc_w;
	else
		width = drm_rect_width(&plane_state->base.dst);

	if (plane->id == PLANE_CURSOR) {
		wm = intel_wm_method2(clock, htotal, width, cpp, latency);
	} else if (plane->id == PLANE_PRIMARY &&
		   level == G4X_WM_LEVEL_NORMAL) {
		wm = intel_wm_method1(clock, cpp, latency);
	} else {
1139
		unsigned int small, large;
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151

		small = intel_wm_method1(clock, cpp, latency);
		large = intel_wm_method2(clock, htotal, width, cpp, latency);

		wm = min(small, large);
	}

	wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
			      width, cpp);

	wm = DIV_ROUND_UP(wm, 64) + 2;

1152
	return min_t(unsigned int, wm, USHRT_MAX);
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
}

static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
				 int level, enum plane_id plane_id, u16 value)
{
	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
	bool dirty = false;

	for (; level < intel_wm_num_levels(dev_priv); level++) {
		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];

		dirty |= raw->plane[plane_id] != value;
		raw->plane[plane_id] = value;
	}

	return dirty;
}

static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
			       int level, u16 value)
{
	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
	bool dirty = false;

	/* NORMAL level doesn't have an FBC watermark */
	level = max(level, G4X_WM_LEVEL_SR);

	for (; level < intel_wm_num_levels(dev_priv); level++) {
		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];

		dirty |= raw->fbc != value;
		raw->fbc = value;
	}

	return dirty;
}

static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
				   const struct intel_plane_state *pstate,
				   uint32_t pri_val);

static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
				     const struct intel_plane_state *plane_state)
{
	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
	enum plane_id plane_id = plane->id;
	bool dirty = false;
	int level;

	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
		dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
		if (plane_id == PLANE_PRIMARY)
			dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
		goto out;
	}

	for (level = 0; level < num_levels; level++) {
		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
		int wm, max_wm;

		wm = g4x_compute_wm(crtc_state, plane_state, level);
		max_wm = g4x_plane_fifo_size(plane_id, level);

		if (wm > max_wm)
			break;

		dirty |= raw->plane[plane_id] != wm;
		raw->plane[plane_id] = wm;

		if (plane_id != PLANE_PRIMARY ||
		    level == G4X_WM_LEVEL_NORMAL)
			continue;

		wm = ilk_compute_fbc_wm(crtc_state, plane_state,
					raw->plane[plane_id]);
		max_wm = g4x_fbc_fifo_size(level);

		/*
		 * FBC wm is not mandatory as we
		 * can always just disable its use.
		 */
		if (wm > max_wm)
			wm = USHRT_MAX;

		dirty |= raw->fbc != wm;
		raw->fbc = wm;
	}

	/* mark watermarks as invalid */
	dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);

	if (plane_id == PLANE_PRIMARY)
		dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);

 out:
	if (dirty) {
		DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
			      plane->base.name,
			      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
			      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
			      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);

		if (plane_id == PLANE_PRIMARY)
			DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n",
				      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
				      crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
	}

	return dirty;
}

static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
				      enum plane_id plane_id, int level)
{
	const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];

	return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
}

static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
				     int level)
{
	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);

	if (level > dev_priv->wm.max_level)
		return false;

	return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
		g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
}

/* mark all levels starting from 'level' as invalid */
static void g4x_invalidate_wms(struct intel_crtc *crtc,
			       struct g4x_wm_state *wm_state, int level)
{
	if (level <= G4X_WM_LEVEL_NORMAL) {
		enum plane_id plane_id;

		for_each_plane_id_on_crtc(crtc, plane_id)
			wm_state->wm.plane[plane_id] = USHRT_MAX;
	}

	if (level <= G4X_WM_LEVEL_SR) {
		wm_state->cxsr = false;
		wm_state->sr.cursor = USHRT_MAX;
		wm_state->sr.plane = USHRT_MAX;
		wm_state->sr.fbc = USHRT_MAX;
	}

	if (level <= G4X_WM_LEVEL_HPLL) {
		wm_state->hpll_en = false;
		wm_state->hpll.cursor = USHRT_MAX;
		wm_state->hpll.plane = USHRT_MAX;
		wm_state->hpll.fbc = USHRT_MAX;
	}
}

static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
	struct intel_atomic_state *state =
		to_intel_atomic_state(crtc_state->base.state);
	struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
	int num_active_planes = hweight32(crtc_state->active_planes &
					  ~BIT(PLANE_CURSOR));
	const struct g4x_pipe_wm *raw;
1321 1322
	const struct intel_plane_state *old_plane_state;
	const struct intel_plane_state *new_plane_state;
1323 1324 1325 1326 1327
	struct intel_plane *plane;
	enum plane_id plane_id;
	int i, level;
	unsigned int dirty = 0;

1328 1329 1330 1331
	for_each_oldnew_intel_plane_in_state(state, plane,
					     old_plane_state,
					     new_plane_state, i) {
		if (new_plane_state->base.crtc != &crtc->base &&
1332 1333 1334
		    old_plane_state->base.crtc != &crtc->base)
			continue;

1335
		if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
			dirty |= BIT(plane->id);
	}

	if (!dirty)
		return 0;

	level = G4X_WM_LEVEL_NORMAL;
	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
		goto out;

	raw = &crtc_state->wm.g4x.raw[level];
	for_each_plane_id_on_crtc(crtc, plane_id)
		wm_state->wm.plane[plane_id] = raw->plane[plane_id];

	level = G4X_WM_LEVEL_SR;

	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
		goto out;

	raw = &crtc_state->wm.g4x.raw[level];
	wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
	wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
	wm_state->sr.fbc = raw->fbc;

	wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);

	level = G4X_WM_LEVEL_HPLL;

	if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
		goto out;

	raw = &crtc_state->wm.g4x.raw[level];
	wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
	wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
	wm_state->hpll.fbc = raw->fbc;

	wm_state->hpll_en = wm_state->cxsr;

	level++;

 out:
	if (level == G4X_WM_LEVEL_NORMAL)
		return -EINVAL;

	/* invalidate the higher levels */
	g4x_invalidate_wms(crtc, wm_state, level);

	/*
	 * Determine if the FBC watermark(s) can be used. IF
	 * this isn't the case we prefer to disable the FBC
	 ( watermark(s) rather than disable the SR/HPLL
	 * level(s) entirely.
	 */
	wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL;

	if (level >= G4X_WM_LEVEL_SR &&
	    wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
		wm_state->fbc_en = false;
	else if (level >= G4X_WM_LEVEL_HPLL &&
		 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
		wm_state->fbc_en = false;

	return 0;
}

static int g4x_compute_intermediate_wm(struct drm_device *dev,
				       struct intel_crtc *crtc,
1403
				       struct intel_crtc_state *new_crtc_state)
1404
{
1405 1406 1407 1408 1409 1410 1411
	struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
	const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
	struct intel_atomic_state *intel_state =
		to_intel_atomic_state(new_crtc_state->base.state);
	const struct intel_crtc_state *old_crtc_state =
		intel_atomic_get_old_crtc_state(intel_state, crtc);
	const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1412 1413
	enum plane_id plane_id;

1414 1415 1416 1417 1418 1419 1420 1421
	if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
		*intermediate = *optimal;

		intermediate->cxsr = false;
		intermediate->hpll_en = false;
		goto out;
	}

1422
	intermediate->cxsr = optimal->cxsr && active->cxsr &&
1423
		!new_crtc_state->disable_cxsr;
1424
	intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1425
		!new_crtc_state->disable_cxsr;
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
	intermediate->fbc_en = optimal->fbc_en && active->fbc_en;

	for_each_plane_id_on_crtc(crtc, plane_id) {
		intermediate->wm.plane[plane_id] =
			max(optimal->wm.plane[plane_id],
			    active->wm.plane[plane_id]);

		WARN_ON(intermediate->wm.plane[plane_id] >
			g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
	}

	intermediate->sr.plane = max(optimal->sr.plane,
				     active->sr.plane);
	intermediate->sr.cursor = max(optimal->sr.cursor,
				      active->sr.cursor);
	intermediate->sr.fbc = max(optimal->sr.fbc,
				   active->sr.fbc);

	intermediate->hpll.plane = max(optimal->hpll.plane,
				       active->hpll.plane);
	intermediate->hpll.cursor = max(optimal->hpll.cursor,
					active->hpll.cursor);
	intermediate->hpll.fbc = max(optimal->hpll.fbc,
				     active->hpll.fbc);

	WARN_ON((intermediate->sr.plane >
		 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
		 intermediate->sr.cursor >
		 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
		intermediate->cxsr);
	WARN_ON((intermediate->sr.plane >
		 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
		 intermediate->sr.cursor >
		 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
		intermediate->hpll_en);

	WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
		intermediate->fbc_en && intermediate->cxsr);
	WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
		intermediate->fbc_en && intermediate->hpll_en);

1467
out:
1468 1469 1470 1471 1472
	/*
	 * If our intermediate WM are identical to the final WM, then we can
	 * omit the post-vblank programming; only update if it's different.
	 */
	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1473
		new_crtc_state->wm.need_postvbl_update = true;
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569

	return 0;
}

static void g4x_merge_wm(struct drm_i915_private *dev_priv,
			 struct g4x_wm_values *wm)
{
	struct intel_crtc *crtc;
	int num_active_crtcs = 0;

	wm->cxsr = true;
	wm->hpll_en = true;
	wm->fbc_en = true;

	for_each_intel_crtc(&dev_priv->drm, crtc) {
		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;

		if (!crtc->active)
			continue;

		if (!wm_state->cxsr)
			wm->cxsr = false;
		if (!wm_state->hpll_en)
			wm->hpll_en = false;
		if (!wm_state->fbc_en)
			wm->fbc_en = false;

		num_active_crtcs++;
	}

	if (num_active_crtcs != 1) {
		wm->cxsr = false;
		wm->hpll_en = false;
		wm->fbc_en = false;
	}

	for_each_intel_crtc(&dev_priv->drm, crtc) {
		const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
		enum pipe pipe = crtc->pipe;

		wm->pipe[pipe] = wm_state->wm;
		if (crtc->active && wm->cxsr)
			wm->sr = wm_state->sr;
		if (crtc->active && wm->hpll_en)
			wm->hpll = wm_state->hpll;
	}
}

static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
{
	struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
	struct g4x_wm_values new_wm = {};

	g4x_merge_wm(dev_priv, &new_wm);

	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
		return;

	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
		_intel_set_memory_cxsr(dev_priv, false);

	g4x_write_wm_values(dev_priv, &new_wm);

	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
		_intel_set_memory_cxsr(dev_priv, true);

	*old_wm = new_wm;
}

static void g4x_initial_watermarks(struct intel_atomic_state *state,
				   struct intel_crtc_state *crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);

	mutex_lock(&dev_priv->wm.wm_mutex);
	crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
	g4x_program_watermarks(dev_priv);
	mutex_unlock(&dev_priv->wm.wm_mutex);
}

static void g4x_optimize_watermarks(struct intel_atomic_state *state,
				    struct intel_crtc_state *crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);

	if (!crtc_state->wm.need_postvbl_update)
		return;

	mutex_lock(&dev_priv->wm.wm_mutex);
	intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
	g4x_program_watermarks(dev_priv);
	mutex_unlock(&dev_priv->wm.wm_mutex);
}

1570 1571
/* latency must be in 0.1us units. */
static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1572 1573
				   unsigned int htotal,
				   unsigned int width,
1574
				   unsigned int cpp,
1575 1576 1577 1578
				   unsigned int latency)
{
	unsigned int ret;

1579 1580
	ret = intel_wm_method2(pixel_rate, htotal,
			       width, cpp, latency);
1581 1582 1583 1584 1585
	ret = DIV_ROUND_UP(ret, 64);

	return ret;
}

1586
static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1587 1588 1589 1590
{
	/* all latencies in usec */
	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;

1591 1592
	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;

1593 1594 1595
	if (IS_CHERRYVIEW(dev_priv)) {
		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1596 1597

		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
1598 1599 1600
	}
}

1601 1602
static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
				     const struct intel_plane_state *plane_state,
1603 1604
				     int level)
{
1605
	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1606
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1607 1608
	const struct drm_display_mode *adjusted_mode =
		&crtc_state->base.adjusted_mode;
1609
	unsigned int clock, htotal, cpp, width, wm;
1610 1611 1612 1613

	if (dev_priv->wm.pri_latency[level] == 0)
		return USHRT_MAX;

1614
	if (!intel_wm_plane_visible(crtc_state, plane_state))
1615 1616
		return 0;

1617
	cpp = plane_state->base.fb->format->cpp[0];
1618 1619 1620
	clock = adjusted_mode->crtc_clock;
	htotal = adjusted_mode->crtc_htotal;
	width = crtc_state->pipe_src_w;
1621

1622
	if (plane->id == PLANE_CURSOR) {
1623 1624 1625 1626 1627 1628 1629 1630
		/*
		 * FIXME the formula gives values that are
		 * too big for the cursor FIFO, and hence we
		 * would never be able to use cursors. For
		 * now just hardcode the watermark.
		 */
		wm = 63;
	} else {
1631
		wm = vlv_wm_method2(clock, htotal, width, cpp,
1632 1633 1634
				    dev_priv->wm.pri_latency[level] * 10);
	}

1635
	return min_t(unsigned int, wm, USHRT_MAX);
1636 1637
}

1638 1639 1640 1641 1642 1643
static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
{
	return (active_planes & (BIT(PLANE_SPRITE0) |
				 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
}

1644
static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1645
{
1646
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1647
	const struct g4x_pipe_wm *raw =
1648
		&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1649
	struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1650 1651 1652
	unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
	int num_active_planes = hweight32(active_planes);
	const int fifo_size = 511;
1653
	int fifo_extra, fifo_left = fifo_size;
1654
	int sprite0_fifo_extra = 0;
1655 1656
	unsigned int total_rate;
	enum plane_id plane_id;
1657

1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	/*
	 * When enabling sprite0 after sprite1 has already been enabled
	 * we tend to get an underrun unless sprite0 already has some
	 * FIFO space allcoated. Hence we always allocate at least one
	 * cacheline for sprite0 whenever sprite1 is enabled.
	 *
	 * All other plane enable sequences appear immune to this problem.
	 */
	if (vlv_need_sprite0_fifo_workaround(active_planes))
		sprite0_fifo_extra = 1;

1669 1670
	total_rate = raw->plane[PLANE_PRIMARY] +
		raw->plane[PLANE_SPRITE0] +
1671 1672
		raw->plane[PLANE_SPRITE1] +
		sprite0_fifo_extra;
1673

1674 1675
	if (total_rate > fifo_size)
		return -EINVAL;
1676

1677 1678
	if (total_rate == 0)
		total_rate = 1;
1679

1680
	for_each_plane_id_on_crtc(crtc, plane_id) {
1681 1682
		unsigned int rate;

1683 1684
		if ((active_planes & BIT(plane_id)) == 0) {
			fifo_state->plane[plane_id] = 0;
1685 1686 1687
			continue;
		}

1688 1689 1690
		rate = raw->plane[plane_id];
		fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
		fifo_left -= fifo_state->plane[plane_id];
1691 1692
	}

1693 1694 1695
	fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
	fifo_left -= sprite0_fifo_extra;

1696 1697 1698
	fifo_state->plane[PLANE_CURSOR] = 63;

	fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1699 1700

	/* spread the remainder evenly */
1701
	for_each_plane_id_on_crtc(crtc, plane_id) {
1702 1703 1704 1705 1706
		int plane_extra;

		if (fifo_left == 0)
			break;

1707
		if ((active_planes & BIT(plane_id)) == 0)
1708 1709 1710
			continue;

		plane_extra = min(fifo_extra, fifo_left);
1711
		fifo_state->plane[plane_id] += plane_extra;
1712 1713 1714
		fifo_left -= plane_extra;
	}

1715 1716 1717 1718 1719 1720 1721 1722 1723
	WARN_ON(active_planes != 0 && fifo_left != 0);

	/* give it all to the first plane if none are active */
	if (active_planes == 0) {
		WARN_ON(fifo_left != fifo_size);
		fifo_state->plane[PLANE_PRIMARY] = fifo_left;
	}

	return 0;
1724 1725
}

1726 1727 1728 1729 1730 1731
/* mark all levels starting from 'level' as invalid */
static void vlv_invalidate_wms(struct intel_crtc *crtc,
			       struct vlv_wm_state *wm_state, int level)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);

1732
	for (; level < intel_wm_num_levels(dev_priv); level++) {
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
		enum plane_id plane_id;

		for_each_plane_id_on_crtc(crtc, plane_id)
			wm_state->wm[level].plane[plane_id] = USHRT_MAX;

		wm_state->sr[level].cursor = USHRT_MAX;
		wm_state->sr[level].plane = USHRT_MAX;
	}
}

1743 1744 1745 1746 1747 1748 1749 1750
static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
{
	if (wm > fifo_size)
		return USHRT_MAX;
	else
		return fifo_size - wm;
}

1751 1752 1753 1754
/*
 * Starting from 'level' set all higher
 * levels to 'value' in the "raw" watermarks.
 */
1755
static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1756
				 int level, enum plane_id plane_id, u16 value)
1757
{
1758
	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1759
	int num_levels = intel_wm_num_levels(dev_priv);
1760
	bool dirty = false;
1761

1762
	for (; level < num_levels; level++) {
1763
		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1764

1765
		dirty |= raw->plane[plane_id] != value;
1766
		raw->plane[plane_id] = value;
1767
	}
1768 1769

	return dirty;
1770 1771
}

1772 1773
static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
				     const struct intel_plane_state *plane_state)
1774
{
1775 1776
	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
	enum plane_id plane_id = plane->id;
1777
	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
1778
	int level;
1779
	bool dirty = false;
1780

1781
	if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1782 1783
		dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
		goto out;
1784
	}
1785

1786
	for (level = 0; level < num_levels; level++) {
1787
		struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1788 1789
		int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
		int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1790

1791 1792
		if (wm > max_wm)
			break;
1793

1794
		dirty |= raw->plane[plane_id] != wm;
1795 1796
		raw->plane[plane_id] = wm;
	}
1797

1798
	/* mark all higher levels as invalid */
1799
	dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1800

1801 1802
out:
	if (dirty)
1803
		DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1804 1805 1806 1807 1808 1809
			      plane->base.name,
			      crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
			      crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
			      crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);

	return dirty;
1810
}
1811

1812 1813
static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
				      enum plane_id plane_id, int level)
1814
{
1815
	const struct g4x_pipe_wm *raw =
1816 1817 1818
		&crtc_state->wm.vlv.raw[level];
	const struct vlv_fifo_state *fifo_state =
		&crtc_state->wm.vlv.fifo_state;
1819

1820 1821
	return raw->plane[plane_id] <= fifo_state->plane[plane_id];
}
1822

1823
static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1824
{
1825 1826 1827 1828
	return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
		vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
}

static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_atomic_state *state =
		to_intel_atomic_state(crtc_state->base.state);
	struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
	const struct vlv_fifo_state *fifo_state =
		&crtc_state->wm.vlv.fifo_state;
	int num_active_planes = hweight32(crtc_state->active_planes &
					  ~BIT(PLANE_CURSOR));
1842
	bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
1843 1844
	const struct intel_plane_state *old_plane_state;
	const struct intel_plane_state *new_plane_state;
1845 1846 1847
	struct intel_plane *plane;
	enum plane_id plane_id;
	int level, ret, i;
1848
	unsigned int dirty = 0;
1849

1850 1851 1852 1853
	for_each_oldnew_intel_plane_in_state(state, plane,
					     old_plane_state,
					     new_plane_state, i) {
		if (new_plane_state->base.crtc != &crtc->base &&
1854 1855
		    old_plane_state->base.crtc != &crtc->base)
			continue;
1856

1857
		if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
			dirty |= BIT(plane->id);
	}

	/*
	 * DSPARB registers may have been reset due to the
	 * power well being turned off. Make sure we restore
	 * them to a consistent state even if no primary/sprite
	 * planes are initially active.
	 */
	if (needs_modeset)
		crtc_state->fifo_changed = true;

	if (!dirty)
		return 0;

	/* cursor changes don't warrant a FIFO recompute */
	if (dirty & ~BIT(PLANE_CURSOR)) {
		const struct intel_crtc_state *old_crtc_state =
1876
			intel_atomic_get_old_crtc_state(state, crtc);
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
		const struct vlv_fifo_state *old_fifo_state =
			&old_crtc_state->wm.vlv.fifo_state;

		ret = vlv_compute_fifo(crtc_state);
		if (ret)
			return ret;

		if (needs_modeset ||
		    memcmp(old_fifo_state, fifo_state,
			   sizeof(*fifo_state)) != 0)
			crtc_state->fifo_changed = true;
1888
	}
1889

1890
	/* initially allow all levels */
1891
	wm_state->num_levels = intel_wm_num_levels(dev_priv);
1892 1893 1894 1895 1896
	/*
	 * Note that enabling cxsr with no primary/sprite planes
	 * enabled can wedge the pipe. Hence we only allow cxsr
	 * with exactly one enabled primary/sprite plane.
	 */
1897
	wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1898

1899
	for (level = 0; level < wm_state->num_levels; level++) {
1900
		const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1901
		const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
1902

1903
		if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1904
			break;
1905

1906 1907 1908 1909 1910 1911 1912 1913
		for_each_plane_id_on_crtc(crtc, plane_id) {
			wm_state->wm[level].plane[plane_id] =
				vlv_invert_wm_value(raw->plane[plane_id],
						    fifo_state->plane[plane_id]);
		}

		wm_state->sr[level].plane =
			vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1914
						 raw->plane[PLANE_SPRITE0],
1915 1916
						 raw->plane[PLANE_SPRITE1]),
					    sr_fifo_size);
1917

1918 1919 1920
		wm_state->sr[level].cursor =
			vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
					    63);
1921 1922
	}

1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
	if (level == 0)
		return -EINVAL;

	/* limit to only levels we can actually handle */
	wm_state->num_levels = level;

	/* invalidate the higher levels */
	vlv_invalidate_wms(crtc, wm_state, level);

	return 0;
1933 1934
}

1935 1936 1937
#define VLV_FIFO(plane, value) \
	(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)

1938 1939
static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
				   struct intel_crtc_state *crtc_state)
1940
{
1941
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1942
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1943 1944
	const struct vlv_fifo_state *fifo_state =
		&crtc_state->wm.vlv.fifo_state;
1945
	int sprite0_start, sprite1_start, fifo_size;
1946

1947 1948 1949
	if (!crtc_state->fifo_changed)
		return;

1950 1951 1952
	sprite0_start = fifo_state->plane[PLANE_PRIMARY];
	sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
	fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1953

1954 1955
	WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
	WARN_ON(fifo_size != 511);
1956

1957 1958
	trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);

1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
	/*
	 * uncore.lock serves a double purpose here. It allows us to
	 * use the less expensive I915_{READ,WRITE}_FW() functions, and
	 * it protects the DSPARB registers from getting clobbered by
	 * parallel updates from multiple pipes.
	 *
	 * intel_pipe_update_start() has already disabled interrupts
	 * for us, so a plain spin_lock() is sufficient here.
	 */
	spin_lock(&dev_priv->uncore.lock);
1969

1970 1971 1972
	switch (crtc->pipe) {
		uint32_t dsparb, dsparb2, dsparb3;
	case PIPE_A:
1973 1974
		dsparb = I915_READ_FW(DSPARB);
		dsparb2 = I915_READ_FW(DSPARB2);
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985

		dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
			    VLV_FIFO(SPRITEB, 0xff));
		dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
			   VLV_FIFO(SPRITEB, sprite1_start));

		dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
			     VLV_FIFO(SPRITEB_HI, 0x1));
		dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
			   VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));

1986 1987
		I915_WRITE_FW(DSPARB, dsparb);
		I915_WRITE_FW(DSPARB2, dsparb2);
1988 1989
		break;
	case PIPE_B:
1990 1991
		dsparb = I915_READ_FW(DSPARB);
		dsparb2 = I915_READ_FW(DSPARB2);
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002

		dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
			    VLV_FIFO(SPRITED, 0xff));
		dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
			   VLV_FIFO(SPRITED, sprite1_start));

		dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
			     VLV_FIFO(SPRITED_HI, 0xff));
		dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
			   VLV_FIFO(SPRITED_HI, sprite1_start >> 8));

2003 2004
		I915_WRITE_FW(DSPARB, dsparb);
		I915_WRITE_FW(DSPARB2, dsparb2);
2005 2006
		break;
	case PIPE_C:
2007 2008
		dsparb3 = I915_READ_FW(DSPARB3);
		dsparb2 = I915_READ_FW(DSPARB2);
2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019

		dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
			     VLV_FIFO(SPRITEF, 0xff));
		dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
			    VLV_FIFO(SPRITEF, sprite1_start));

		dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
			     VLV_FIFO(SPRITEF_HI, 0xff));
		dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
			   VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));

2020 2021
		I915_WRITE_FW(DSPARB3, dsparb3);
		I915_WRITE_FW(DSPARB2, dsparb2);
2022 2023 2024 2025
		break;
	default:
		break;
	}
2026

2027
	POSTING_READ_FW(DSPARB);
2028

2029
	spin_unlock(&dev_priv->uncore.lock);
2030 2031 2032 2033
}

#undef VLV_FIFO

2034 2035
static int vlv_compute_intermediate_wm(struct drm_device *dev,
				       struct intel_crtc *crtc,
2036
				       struct intel_crtc_state *new_crtc_state)
2037
{
2038 2039 2040 2041 2042 2043 2044
	struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
	const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
	struct intel_atomic_state *intel_state =
		to_intel_atomic_state(new_crtc_state->base.state);
	const struct intel_crtc_state *old_crtc_state =
		intel_atomic_get_old_crtc_state(intel_state, crtc);
	const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2045 2046
	int level;

2047 2048 2049 2050 2051 2052 2053
	if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
		*intermediate = *optimal;

		intermediate->cxsr = false;
		goto out;
	}

2054
	intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2055
	intermediate->cxsr = optimal->cxsr && active->cxsr &&
2056
		!new_crtc_state->disable_cxsr;
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074

	for (level = 0; level < intermediate->num_levels; level++) {
		enum plane_id plane_id;

		for_each_plane_id_on_crtc(crtc, plane_id) {
			intermediate->wm[level].plane[plane_id] =
				min(optimal->wm[level].plane[plane_id],
				    active->wm[level].plane[plane_id]);
		}

		intermediate->sr[level].plane = min(optimal->sr[level].plane,
						    active->sr[level].plane);
		intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
						     active->sr[level].cursor);
	}

	vlv_invalidate_wms(crtc, intermediate, level);

2075
out:
2076 2077 2078 2079
	/*
	 * If our intermediate WM are identical to the final WM, then we can
	 * omit the post-vblank programming; only update if it's different.
	 */
2080
	if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2081
		new_crtc_state->wm.need_postvbl_update = true;
2082 2083 2084 2085

	return 0;
}

2086
static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2087 2088 2089 2090 2091
			 struct vlv_wm_values *wm)
{
	struct intel_crtc *crtc;
	int num_active_crtcs = 0;

2092
	wm->level = dev_priv->wm.max_level;
2093 2094
	wm->cxsr = true;

2095
	for_each_intel_crtc(&dev_priv->drm, crtc) {
2096
		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110

		if (!crtc->active)
			continue;

		if (!wm_state->cxsr)
			wm->cxsr = false;

		num_active_crtcs++;
		wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
	}

	if (num_active_crtcs != 1)
		wm->cxsr = false;

2111 2112 2113
	if (num_active_crtcs > 1)
		wm->level = VLV_WM_LEVEL_PM2;

2114
	for_each_intel_crtc(&dev_priv->drm, crtc) {
2115
		const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2116 2117 2118
		enum pipe pipe = crtc->pipe;

		wm->pipe[pipe] = wm_state->wm[wm->level];
2119
		if (crtc->active && wm->cxsr)
2120 2121
			wm->sr = wm_state->sr[wm->level];

2122 2123 2124 2125
		wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
		wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
		wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
		wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2126 2127 2128
	}
}

2129
static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2130
{
2131 2132
	struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
	struct vlv_wm_values new_wm = {};
2133

2134
	vlv_merge_wm(dev_priv, &new_wm);
2135

2136
	if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2137 2138
		return;

2139
	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2140 2141
		chv_set_memory_dvfs(dev_priv, false);

2142
	if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2143 2144
		chv_set_memory_pm5(dev_priv, false);

2145
	if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2146
		_intel_set_memory_cxsr(dev_priv, false);
2147

2148
	vlv_write_wm_values(dev_priv, &new_wm);
2149

2150
	if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2151
		_intel_set_memory_cxsr(dev_priv, true);
2152

2153
	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2154 2155
		chv_set_memory_pm5(dev_priv, true);

2156
	if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2157 2158
		chv_set_memory_dvfs(dev_priv, true);

2159
	*old_wm = new_wm;
2160 2161
}

2162 2163 2164 2165 2166 2167 2168
static void vlv_initial_watermarks(struct intel_atomic_state *state,
				   struct intel_crtc_state *crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);

	mutex_lock(&dev_priv->wm.wm_mutex);
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
	crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
	vlv_program_watermarks(dev_priv);
	mutex_unlock(&dev_priv->wm.wm_mutex);
}

static void vlv_optimize_watermarks(struct intel_atomic_state *state,
				    struct intel_crtc_state *crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);

	if (!crtc_state->wm.need_postvbl_update)
		return;

	mutex_lock(&dev_priv->wm.wm_mutex);
	intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2185 2186 2187 2188
	vlv_program_watermarks(dev_priv);
	mutex_unlock(&dev_priv->wm.wm_mutex);
}

2189
static void i965_update_wm(struct intel_crtc *unused_crtc)
2190
{
2191
	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2192
	struct intel_crtc *crtc;
2193 2194
	int srwm = 1;
	int cursor_sr = 16;
2195
	bool cxsr_enabled;
2196 2197

	/* Calc sr entries for one plane configs */
2198
	crtc = single_enabled_crtc(dev_priv);
2199 2200 2201
	if (crtc) {
		/* self-refresh has much higher latency */
		static const int sr_latency_ns = 12000;
2202 2203 2204 2205
		const struct drm_display_mode *adjusted_mode =
			&crtc->config->base.adjusted_mode;
		const struct drm_framebuffer *fb =
			crtc->base.primary->state->fb;
2206
		int clock = adjusted_mode->crtc_clock;
2207
		int htotal = adjusted_mode->crtc_htotal;
2208
		int hdisplay = crtc->config->pipe_src_w;
2209
		int cpp = fb->format->cpp[0];
2210 2211
		int entries;

2212 2213
		entries = intel_wm_method2(clock, htotal,
					   hdisplay, cpp, sr_latency_ns / 100);
2214 2215 2216 2217 2218 2219 2220 2221
		entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
		srwm = I965_FIFO_SIZE - entries;
		if (srwm < 0)
			srwm = 1;
		srwm &= 0x1ff;
		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
			      entries, srwm);

2222 2223 2224
		entries = intel_wm_method2(clock, htotal,
					   crtc->base.cursor->state->crtc_w, 4,
					   sr_latency_ns / 100);
2225
		entries = DIV_ROUND_UP(entries,
2226 2227
				       i965_cursor_wm_info.cacheline_size) +
			i965_cursor_wm_info.guard_size;
2228

2229
		cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2230 2231 2232 2233 2234 2235
		if (cursor_sr > i965_cursor_wm_info.max_wm)
			cursor_sr = i965_cursor_wm_info.max_wm;

		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
			      "cursor %d\n", srwm, cursor_sr);

2236
		cxsr_enabled = true;
2237
	} else {
2238
		cxsr_enabled = false;
2239
		/* Turn off self refresh if both pipes are enabled */
2240
		intel_set_memory_cxsr(dev_priv, false);
2241 2242 2243 2244 2245 2246
	}

	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
		      srwm);

	/* 965 has limitations... */
2247 2248 2249 2250 2251 2252
	I915_WRITE(DSPFW1, FW_WM(srwm, SR) |
		   FW_WM(8, CURSORB) |
		   FW_WM(8, PLANEB) |
		   FW_WM(8, PLANEA));
	I915_WRITE(DSPFW2, FW_WM(8, CURSORA) |
		   FW_WM(8, PLANEC_OLD));
2253
	/* update cursor SR watermark */
2254
	I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
2255 2256 2257

	if (cxsr_enabled)
		intel_set_memory_cxsr(dev_priv, true);
2258 2259
}

2260 2261
#undef FW_WM

2262
static void i9xx_update_wm(struct intel_crtc *unused_crtc)
2263
{
2264
	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2265 2266 2267 2268 2269 2270
	const struct intel_watermark_params *wm_info;
	uint32_t fwater_lo;
	uint32_t fwater_hi;
	int cwm, srwm = 1;
	int fifo_size;
	int planea_wm, planeb_wm;
2271
	struct intel_crtc *crtc, *enabled = NULL;
2272

2273
	if (IS_I945GM(dev_priv))
2274
		wm_info = &i945_wm_info;
2275
	else if (!IS_GEN2(dev_priv))
2276 2277
		wm_info = &i915_wm_info;
	else
2278
		wm_info = &i830_a_wm_info;
2279

2280 2281
	fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
2282 2283 2284 2285 2286 2287 2288
	if (intel_crtc_active(crtc)) {
		const struct drm_display_mode *adjusted_mode =
			&crtc->config->base.adjusted_mode;
		const struct drm_framebuffer *fb =
			crtc->base.primary->state->fb;
		int cpp;

2289
		if (IS_GEN2(dev_priv))
2290
			cpp = 4;
2291
		else
2292
			cpp = fb->format->cpp[0];
2293

2294
		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2295
					       wm_info, fifo_size, cpp,
2296
					       pessimal_latency_ns);
2297
		enabled = crtc;
2298
	} else {
2299
		planea_wm = fifo_size - wm_info->guard_size;
2300 2301 2302 2303
		if (planea_wm > (long)wm_info->max_wm)
			planea_wm = wm_info->max_wm;
	}

2304
	if (IS_GEN2(dev_priv))
2305
		wm_info = &i830_bc_wm_info;
2306

2307 2308
	fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
	crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
2309 2310 2311 2312 2313 2314 2315
	if (intel_crtc_active(crtc)) {
		const struct drm_display_mode *adjusted_mode =
			&crtc->config->base.adjusted_mode;
		const struct drm_framebuffer *fb =
			crtc->base.primary->state->fb;
		int cpp;

2316
		if (IS_GEN2(dev_priv))
2317
			cpp = 4;
2318
		else
2319
			cpp = fb->format->cpp[0];
2320

2321
		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2322
					       wm_info, fifo_size, cpp,
2323
					       pessimal_latency_ns);
2324 2325 2326 2327
		if (enabled == NULL)
			enabled = crtc;
		else
			enabled = NULL;
2328
	} else {
2329
		planeb_wm = fifo_size - wm_info->guard_size;
2330 2331 2332
		if (planeb_wm > (long)wm_info->max_wm)
			planeb_wm = wm_info->max_wm;
	}
2333 2334 2335

	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);

2336
	if (IS_I915GM(dev_priv) && enabled) {
2337
		struct drm_i915_gem_object *obj;
2338

2339
		obj = intel_fb_obj(enabled->base.primary->state->fb);
2340 2341

		/* self-refresh seems busted with untiled */
2342
		if (!i915_gem_object_is_tiled(obj))
2343 2344 2345
			enabled = NULL;
	}

2346 2347 2348 2349 2350 2351
	/*
	 * Overlay gets an aggressive default since video jitter is bad.
	 */
	cwm = 2;

	/* Play safe and disable self-refresh before adjusting watermarks. */
2352
	intel_set_memory_cxsr(dev_priv, false);
2353 2354

	/* Calc sr entries for one plane configs */
2355
	if (HAS_FW_BLC(dev_priv) && enabled) {
2356 2357
		/* self-refresh has much higher latency */
		static const int sr_latency_ns = 6000;
2358 2359 2360 2361
		const struct drm_display_mode *adjusted_mode =
			&enabled->config->base.adjusted_mode;
		const struct drm_framebuffer *fb =
			enabled->base.primary->state->fb;
2362
		int clock = adjusted_mode->crtc_clock;
2363
		int htotal = adjusted_mode->crtc_htotal;
2364 2365
		int hdisplay = enabled->config->pipe_src_w;
		int cpp;
2366 2367
		int entries;

2368
		if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2369
			cpp = 4;
2370
		else
2371
			cpp = fb->format->cpp[0];
2372

2373 2374
		entries = intel_wm_method2(clock, htotal, hdisplay, cpp,
					   sr_latency_ns / 100);
2375 2376 2377 2378 2379 2380
		entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
		srwm = wm_info->fifo_size - entries;
		if (srwm < 0)
			srwm = 1;

2381
		if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2382 2383
			I915_WRITE(FW_BLC_SELF,
				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2384
		else
2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400
			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
	}

	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
		      planea_wm, planeb_wm, cwm, srwm);

	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
	fwater_hi = (cwm & 0x1f);

	/* Set request length to 8 cachelines per fetch */
	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
	fwater_hi = fwater_hi | (1 << 8);

	I915_WRITE(FW_BLC, fwater_lo);
	I915_WRITE(FW_BLC2, fwater_hi);

2401 2402
	if (enabled)
		intel_set_memory_cxsr(dev_priv, true);
2403 2404
}

2405
static void i845_update_wm(struct intel_crtc *unused_crtc)
2406
{
2407
	struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
2408
	struct intel_crtc *crtc;
2409
	const struct drm_display_mode *adjusted_mode;
2410 2411 2412
	uint32_t fwater_lo;
	int planea_wm;

2413
	crtc = single_enabled_crtc(dev_priv);
2414 2415 2416
	if (crtc == NULL)
		return;

2417
	adjusted_mode = &crtc->config->base.adjusted_mode;
2418
	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
2419
				       &i845_wm_info,
2420
				       dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
2421
				       4, pessimal_latency_ns);
2422 2423 2424 2425 2426 2427 2428 2429
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
	fwater_lo |= (3<<8) | planea_wm;

	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);

	I915_WRITE(FW_BLC, fwater_lo);
}

2430
/* latency must be in 0.1us units. */
2431 2432 2433
static unsigned int ilk_wm_method1(unsigned int pixel_rate,
				   unsigned int cpp,
				   unsigned int latency)
2434
{
2435
	unsigned int ret;
2436

2437 2438
	ret = intel_wm_method1(pixel_rate, cpp, latency);
	ret = DIV_ROUND_UP(ret, 64) + 2;
2439 2440 2441 2442

	return ret;
}

2443
/* latency must be in 0.1us units. */
2444 2445 2446 2447 2448
static unsigned int ilk_wm_method2(unsigned int pixel_rate,
				   unsigned int htotal,
				   unsigned int width,
				   unsigned int cpp,
				   unsigned int latency)
2449
{
2450
	unsigned int ret;
2451

2452 2453
	ret = intel_wm_method2(pixel_rate, htotal,
			       width, cpp, latency);
2454
	ret = DIV_ROUND_UP(ret, 64) + 2;
2455

2456 2457 2458
	return ret;
}

2459
static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2460
			   uint8_t cpp)
2461
{
2462 2463 2464 2465 2466 2467
	/*
	 * Neither of these should be possible since this function shouldn't be
	 * called if the CRTC is off or the plane is invisible.  But let's be
	 * extra paranoid to avoid a potential divide-by-zero if we screw up
	 * elsewhere in the driver.
	 */
2468
	if (WARN_ON(!cpp))
2469 2470 2471 2472
		return 0;
	if (WARN_ON(!horiz_pixels))
		return 0;

2473
	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2474 2475
}

2476
struct ilk_wm_maximums {
2477 2478 2479 2480 2481 2482
	uint16_t pri;
	uint16_t spr;
	uint16_t cur;
	uint16_t fbc;
};

2483 2484 2485 2486
/*
 * For both WM_PIPE and WM_LP.
 * mem_value must be in 0.1us units.
 */
2487
static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
2488
				   const struct intel_plane_state *pstate,
2489 2490
				   uint32_t mem_value,
				   bool is_lp)
2491
{
2492
	uint32_t method1, method2;
2493
	int cpp;
2494

2495
	if (!intel_wm_plane_visible(cstate, pstate))
2496 2497
		return 0;

2498
	cpp = pstate->base.fb->format->cpp[0];
2499

2500
	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
2501 2502 2503 2504

	if (!is_lp)
		return method1;

2505
	method2 = ilk_wm_method2(cstate->pixel_rate,
2506
				 cstate->base.adjusted_mode.crtc_htotal,
2507
				 drm_rect_width(&pstate->base.dst),
2508
				 cpp, mem_value);
2509 2510

	return min(method1, method2);
2511 2512
}

2513 2514 2515 2516
/*
 * For both WM_PIPE and WM_LP.
 * mem_value must be in 0.1us units.
 */
2517
static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
2518
				   const struct intel_plane_state *pstate,
2519 2520 2521
				   uint32_t mem_value)
{
	uint32_t method1, method2;
2522
	int cpp;
2523

2524
	if (!intel_wm_plane_visible(cstate, pstate))
2525 2526
		return 0;

2527
	cpp = pstate->base.fb->format->cpp[0];
2528

2529 2530
	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
	method2 = ilk_wm_method2(cstate->pixel_rate,
2531
				 cstate->base.adjusted_mode.crtc_htotal,
2532
				 drm_rect_width(&pstate->base.dst),
2533
				 cpp, mem_value);
2534 2535 2536
	return min(method1, method2);
}

2537 2538 2539 2540
/*
 * For both WM_PIPE and WM_LP.
 * mem_value must be in 0.1us units.
 */
2541
static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
2542
				   const struct intel_plane_state *pstate,
2543 2544
				   uint32_t mem_value)
{
2545 2546
	int cpp;

2547
	if (!intel_wm_plane_visible(cstate, pstate))
2548 2549
		return 0;

2550 2551
	cpp = pstate->base.fb->format->cpp[0];

2552
	return ilk_wm_method2(cstate->pixel_rate,
2553
			      cstate->base.adjusted_mode.crtc_htotal,
2554
			      pstate->base.crtc_w, cpp, mem_value);
2555 2556
}

2557
/* Only for WM_LP. */
2558
static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
2559
				   const struct intel_plane_state *pstate,
2560
				   uint32_t pri_val)
2561
{
2562
	int cpp;
2563

2564
	if (!intel_wm_plane_visible(cstate, pstate))
2565 2566
		return 0;

2567
	cpp = pstate->base.fb->format->cpp[0];
2568

2569
	return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
2570 2571
}

2572 2573
static unsigned int
ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2574
{
2575
	if (INTEL_GEN(dev_priv) >= 8)
2576
		return 3072;
2577
	else if (INTEL_GEN(dev_priv) >= 7)
2578 2579 2580 2581 2582
		return 768;
	else
		return 512;
}

2583 2584 2585
static unsigned int
ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
		     int level, bool is_sprite)
2586
{
2587
	if (INTEL_GEN(dev_priv) >= 8)
2588 2589
		/* BDW primary/sprite plane watermarks */
		return level == 0 ? 255 : 2047;
2590
	else if (INTEL_GEN(dev_priv) >= 7)
2591 2592 2593 2594 2595 2596 2597 2598 2599 2600
		/* IVB/HSW primary/sprite plane watermarks */
		return level == 0 ? 127 : 1023;
	else if (!is_sprite)
		/* ILK/SNB primary plane watermarks */
		return level == 0 ? 127 : 511;
	else
		/* ILK/SNB sprite plane watermarks */
		return level == 0 ? 63 : 255;
}

2601 2602
static unsigned int
ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2603
{
2604
	if (INTEL_GEN(dev_priv) >= 7)
2605 2606 2607 2608 2609
		return level == 0 ? 63 : 255;
	else
		return level == 0 ? 31 : 63;
}

2610
static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2611
{
2612
	if (INTEL_GEN(dev_priv) >= 8)
2613 2614 2615 2616 2617
		return 31;
	else
		return 15;
}

2618 2619 2620
/* Calculate the maximum primary/sprite plane watermark */
static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
				     int level,
2621
				     const struct intel_wm_config *config,
2622 2623 2624
				     enum intel_ddb_partitioning ddb_partitioning,
				     bool is_sprite)
{
2625 2626
	struct drm_i915_private *dev_priv = to_i915(dev);
	unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2627 2628

	/* if sprites aren't enabled, sprites get nothing */
2629
	if (is_sprite && !config->sprites_enabled)
2630 2631 2632
		return 0;

	/* HSW allows LP1+ watermarks even with multiple pipes */
2633
	if (level == 0 || config->num_pipes_active > 1) {
2634
		fifo_size /= INTEL_INFO(dev_priv)->num_pipes;
2635 2636 2637 2638 2639 2640

		/*
		 * For some reason the non self refresh
		 * FIFO size is only half of the self
		 * refresh FIFO size on ILK/SNB.
		 */
2641
		if (INTEL_GEN(dev_priv) <= 6)
2642 2643 2644
			fifo_size /= 2;
	}

2645
	if (config->sprites_enabled) {
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656
		/* level 0 is always calculated with 1:1 split */
		if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
			if (is_sprite)
				fifo_size *= 5;
			fifo_size /= 6;
		} else {
			fifo_size /= 2;
		}
	}

	/* clamp to max that the registers can hold */
2657
	return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2658 2659 2660 2661
}

/* Calculate the maximum cursor plane watermark */
static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2662 2663
				      int level,
				      const struct intel_wm_config *config)
2664 2665
{
	/* HSW LP1+ watermarks w/ multiple pipes */
2666
	if (level > 0 && config->num_pipes_active > 1)
2667 2668 2669
		return 64;

	/* otherwise just report max that registers can hold */
2670
	return ilk_cursor_wm_reg_max(to_i915(dev), level);
2671 2672
}

2673
static void ilk_compute_wm_maximums(const struct drm_device *dev,
2674 2675 2676
				    int level,
				    const struct intel_wm_config *config,
				    enum intel_ddb_partitioning ddb_partitioning,
2677
				    struct ilk_wm_maximums *max)
2678
{
2679 2680 2681
	max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
	max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
	max->cur = ilk_cursor_wm_max(dev, level, config);
2682
	max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
2683 2684
}

2685
static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2686 2687 2688
					int level,
					struct ilk_wm_maximums *max)
{
2689 2690 2691 2692
	max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
	max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
	max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
	max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2693 2694
}

2695
static bool ilk_validate_wm_level(int level,
2696
				  const struct ilk_wm_maximums *max,
2697
				  struct intel_wm_level *result)
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735
{
	bool ret;

	/* already determined to be invalid? */
	if (!result->enable)
		return false;

	result->enable = result->pri_val <= max->pri &&
			 result->spr_val <= max->spr &&
			 result->cur_val <= max->cur;

	ret = result->enable;

	/*
	 * HACK until we can pre-compute everything,
	 * and thus fail gracefully if LP0 watermarks
	 * are exceeded...
	 */
	if (level == 0 && !result->enable) {
		if (result->pri_val > max->pri)
			DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
				      level, result->pri_val, max->pri);
		if (result->spr_val > max->spr)
			DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
				      level, result->spr_val, max->spr);
		if (result->cur_val > max->cur)
			DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
				      level, result->cur_val, max->cur);

		result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
		result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
		result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
		result->enable = true;
	}

	return ret;
}

2736
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2737
				 const struct intel_crtc *intel_crtc,
2738
				 int level,
2739
				 struct intel_crtc_state *cstate,
2740 2741 2742
				 const struct intel_plane_state *pristate,
				 const struct intel_plane_state *sprstate,
				 const struct intel_plane_state *curstate,
2743
				 struct intel_wm_level *result)
2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
{
	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
	uint16_t spr_latency = dev_priv->wm.spr_latency[level];
	uint16_t cur_latency = dev_priv->wm.cur_latency[level];

	/* WM1+ latency values stored in 0.5us units */
	if (level > 0) {
		pri_latency *= 5;
		spr_latency *= 5;
		cur_latency *= 5;
	}

2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767
	if (pristate) {
		result->pri_val = ilk_compute_pri_wm(cstate, pristate,
						     pri_latency, level);
		result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
	}

	if (sprstate)
		result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);

	if (curstate)
		result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);

2768 2769 2770
	result->enable = true;
}

2771
static uint32_t
2772
hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2773
{
2774 2775
	const struct intel_atomic_state *intel_state =
		to_intel_atomic_state(cstate->base.state);
2776 2777
	const struct drm_display_mode *adjusted_mode =
		&cstate->base.adjusted_mode;
2778
	u32 linetime, ips_linetime;
2779

2780 2781 2782 2783
	if (!cstate->base.active)
		return 0;
	if (WARN_ON(adjusted_mode->crtc_clock == 0))
		return 0;
2784
	if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
2785
		return 0;
2786

2787 2788 2789
	/* The WM are computed with base on how long it takes to fill a single
	 * row at the given clock rate, multiplied by 8.
	 * */
2790 2791 2792
	linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
				     adjusted_mode->crtc_clock);
	ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2793
					 intel_state->cdclk.logical.cdclk);
2794

2795 2796
	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
	       PIPE_WM_LINETIME_TIME(linetime);
2797 2798
}

2799 2800
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
				  uint16_t wm[8])
2801
{
2802
	if (INTEL_GEN(dev_priv) >= 9) {
2803
		uint32_t val;
2804
		int ret, i;
2805
		int level, max_level = ilk_wm_max_level(dev_priv);
2806 2807 2808

		/* read the first set of memory latencies[0:3] */
		val = 0; /* data0 to be programmed to 0 for first set */
2809
		mutex_lock(&dev_priv->pcu_lock);
2810 2811 2812
		ret = sandybridge_pcode_read(dev_priv,
					     GEN9_PCODE_READ_MEM_LATENCY,
					     &val);
2813
		mutex_unlock(&dev_priv->pcu_lock);
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829

		if (ret) {
			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
			return;
		}

		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
				GEN9_MEM_LATENCY_LEVEL_MASK;
		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
				GEN9_MEM_LATENCY_LEVEL_MASK;
		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
				GEN9_MEM_LATENCY_LEVEL_MASK;

		/* read the second set of memory latencies[4:7] */
		val = 1; /* data0 to be programmed to 1 for second set */
2830
		mutex_lock(&dev_priv->pcu_lock);
2831 2832 2833
		ret = sandybridge_pcode_read(dev_priv,
					     GEN9_PCODE_READ_MEM_LATENCY,
					     &val);
2834
		mutex_unlock(&dev_priv->pcu_lock);
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
		if (ret) {
			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
			return;
		}

		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
				GEN9_MEM_LATENCY_LEVEL_MASK;
		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
				GEN9_MEM_LATENCY_LEVEL_MASK;
		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
				GEN9_MEM_LATENCY_LEVEL_MASK;

2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860
		/*
		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
		 * need to be disabled. We make sure to sanitize the values out
		 * of the punit to satisfy this requirement.
		 */
		for (level = 1; level <= max_level; level++) {
			if (wm[level] == 0) {
				for (i = level + 1; i <= max_level; i++)
					wm[i] = 0;
				break;
			}
		}

2861
		/*
2862
		 * WaWmMemoryReadLatency:skl+,glk
2863
		 *
2864
		 * punit doesn't take into account the read latency so we need
2865 2866
		 * to add 2us to the various latency levels we retrieve from the
		 * punit when level 0 response data us 0us.
2867
		 */
2868 2869 2870 2871 2872
		if (wm[0] == 0) {
			wm[0] += 2;
			for (level = 1; level <= max_level; level++) {
				if (wm[level] == 0)
					break;
2873
				wm[level] += 2;
2874
			}
2875 2876
		}

2877
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2878 2879 2880 2881 2882
		uint64_t sskpd = I915_READ64(MCH_SSKPD);

		wm[0] = (sskpd >> 56) & 0xFF;
		if (wm[0] == 0)
			wm[0] = sskpd & 0xF;
2883 2884 2885 2886
		wm[1] = (sskpd >> 4) & 0xFF;
		wm[2] = (sskpd >> 12) & 0xFF;
		wm[3] = (sskpd >> 20) & 0x1FF;
		wm[4] = (sskpd >> 32) & 0x1FF;
2887
	} else if (INTEL_GEN(dev_priv) >= 6) {
2888 2889 2890 2891 2892 2893
		uint32_t sskpd = I915_READ(MCH_SSKPD);

		wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2894
	} else if (INTEL_GEN(dev_priv) >= 5) {
2895 2896 2897 2898 2899 2900
		uint32_t mltr = I915_READ(MLTR_ILK);

		/* ILK primary LP0 latency is 700 ns */
		wm[0] = 7;
		wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
		wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2901 2902
	} else {
		MISSING_CASE(INTEL_DEVID(dev_priv));
2903 2904 2905
	}
}

2906 2907
static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
				       uint16_t wm[5])
2908 2909
{
	/* ILK sprite LP0 latency is 1300 ns */
2910
	if (IS_GEN5(dev_priv))
2911 2912 2913
		wm[0] = 13;
}

2914 2915
static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
				       uint16_t wm[5])
2916 2917
{
	/* ILK cursor LP0 latency is 1300 ns */
2918
	if (IS_GEN5(dev_priv))
2919 2920 2921
		wm[0] = 13;
}

2922
int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
2923 2924
{
	/* how many WM levels are we expecting */
2925
	if (INTEL_GEN(dev_priv) >= 9)
2926
		return 7;
2927
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2928
		return 4;
2929
	else if (INTEL_GEN(dev_priv) >= 6)
2930
		return 3;
2931
	else
2932 2933
		return 2;
}
2934

2935
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
2936
				   const char *name,
2937
				   const uint16_t wm[8])
2938
{
2939
	int level, max_level = ilk_wm_max_level(dev_priv);
2940 2941 2942 2943 2944

	for (level = 0; level <= max_level; level++) {
		unsigned int latency = wm[level];

		if (latency == 0) {
2945 2946
			DRM_DEBUG_KMS("%s WM%d latency not provided\n",
				      name, level);
2947 2948 2949
			continue;
		}

2950 2951 2952 2953
		/*
		 * - latencies are in us on gen9.
		 * - before then, WM1+ latency values are in 0.5us units
		 */
2954
		if (INTEL_GEN(dev_priv) >= 9)
2955 2956
			latency *= 10;
		else if (level > 0)
2957 2958 2959 2960 2961 2962 2963 2964
			latency *= 5;

		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
			      name, level, wm[level],
			      latency / 10, latency % 10);
	}
}

2965 2966 2967
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
				    uint16_t wm[5], uint16_t min)
{
2968
	int level, max_level = ilk_wm_max_level(dev_priv);
2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979

	if (wm[0] >= min)
		return false;

	wm[0] = max(wm[0], min);
	for (level = 1; level <= max_level; level++)
		wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));

	return true;
}

2980
static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995
{
	bool changed;

	/*
	 * The BIOS provided WM memory latency values are often
	 * inadequate for high resolution displays. Adjust them.
	 */
	changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
		ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
		ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);

	if (!changed)
		return;

	DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2996 2997 2998
	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
2999 3000
}

3001
static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
3002
{
3003
	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
3004 3005 3006 3007 3008 3009

	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
	       sizeof(dev_priv->wm.pri_latency));
	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
	       sizeof(dev_priv->wm.pri_latency));

3010
	intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
3011
	intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
3012

3013 3014 3015
	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
3016

3017
	if (IS_GEN6(dev_priv))
3018
		snb_wm_latency_quirk(dev_priv);
3019 3020
}

3021
static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
3022
{
3023
	intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
3024
	intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
3025 3026
}

3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
static bool ilk_validate_pipe_wm(struct drm_device *dev,
				 struct intel_pipe_wm *pipe_wm)
{
	/* LP0 watermark maximums depend on this pipe alone */
	const struct intel_wm_config config = {
		.num_pipes_active = 1,
		.sprites_enabled = pipe_wm->sprites_enabled,
		.sprites_scaled = pipe_wm->sprites_scaled,
	};
	struct ilk_wm_maximums max;

	/* LP0 watermarks always use 1/2 DDB partitioning */
	ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);

	/* At least LP0 must be valid */
	if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
		DRM_DEBUG_KMS("LP0 watermark invalid\n");
		return false;
	}

	return true;
}

3050
/* Compute new watermarks for the pipe */
3051
static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
3052
{
3053 3054
	struct drm_atomic_state *state = cstate->base.state;
	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3055
	struct intel_pipe_wm *pipe_wm;
3056
	struct drm_device *dev = state->dev;
3057
	const struct drm_i915_private *dev_priv = to_i915(dev);
3058 3059 3060 3061 3062
	struct drm_plane *plane;
	const struct drm_plane_state *plane_state;
	const struct intel_plane_state *pristate = NULL;
	const struct intel_plane_state *sprstate = NULL;
	const struct intel_plane_state *curstate = NULL;
3063
	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
3064
	struct ilk_wm_maximums max;
3065

3066
	pipe_wm = &cstate->wm.ilk.optimal;
3067

3068 3069
	drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
		const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
3070

3071
		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3072
			pristate = ps;
3073
		else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
3074
			sprstate = ps;
3075
		else if (plane->type == DRM_PLANE_TYPE_CURSOR)
3076
			curstate = ps;
3077 3078
	}

3079
	pipe_wm->pipe_enabled = cstate->base.active;
3080
	if (sprstate) {
3081 3082 3083 3084
		pipe_wm->sprites_enabled = sprstate->base.visible;
		pipe_wm->sprites_scaled = sprstate->base.visible &&
			(drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 ||
			 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
3085 3086
	}

3087 3088
	usable_level = max_level;

3089
	/* ILK/SNB: LP2+ watermarks only w/o sprites */
3090
	if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
3091
		usable_level = 1;
3092 3093

	/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
3094
	if (pipe_wm->sprites_scaled)
3095
		usable_level = 0;
3096

3097
	memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
3098 3099
	ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
			     pristate, sprstate, curstate, &pipe_wm->wm[0]);
3100

3101
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3102
		pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
3103

3104
	if (!ilk_validate_pipe_wm(dev, pipe_wm))
3105
		return -EINVAL;
3106

3107
	ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
3108

3109 3110
	for (level = 1; level <= usable_level; level++) {
		struct intel_wm_level *wm = &pipe_wm->wm[level];
3111

3112
		ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
3113
				     pristate, sprstate, curstate, wm);
3114 3115 3116 3117 3118 3119

		/*
		 * Disable any watermark level that exceeds the
		 * register maximums since such watermarks are
		 * always invalid.
		 */
3120 3121 3122 3123
		if (!ilk_validate_wm_level(level, &max, wm)) {
			memset(wm, 0, sizeof(*wm));
			break;
		}
3124 3125
	}

3126
	return 0;
3127 3128
}

3129 3130 3131 3132 3133 3134 3135 3136 3137
/*
 * Build a set of 'intermediate' watermark values that satisfy both the old
 * state and the new state.  These can be programmed to the hardware
 * immediately.
 */
static int ilk_compute_intermediate_wm(struct drm_device *dev,
				       struct intel_crtc *intel_crtc,
				       struct intel_crtc_state *newstate)
{
3138
	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
3139 3140 3141 3142 3143
	struct intel_atomic_state *intel_state =
		to_intel_atomic_state(newstate->base.state);
	const struct intel_crtc_state *oldstate =
		intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
	const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
3144
	int level, max_level = ilk_wm_max_level(to_i915(dev));
3145 3146 3147 3148 3149 3150

	/*
	 * Start with the final, target watermarks, then combine with the
	 * currently active watermarks to get values that are safe both before
	 * and after the vblank.
	 */
3151
	*a = newstate->wm.ilk.optimal;
3152 3153 3154
	if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
		return 0;

3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182
	a->pipe_enabled |= b->pipe_enabled;
	a->sprites_enabled |= b->sprites_enabled;
	a->sprites_scaled |= b->sprites_scaled;

	for (level = 0; level <= max_level; level++) {
		struct intel_wm_level *a_wm = &a->wm[level];
		const struct intel_wm_level *b_wm = &b->wm[level];

		a_wm->enable &= b_wm->enable;
		a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
		a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
		a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
		a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
	}

	/*
	 * We need to make sure that these merged watermark values are
	 * actually a valid configuration themselves.  If they're not,
	 * there's no safe way to transition from the old state to
	 * the new state, so we need to fail the atomic transaction.
	 */
	if (!ilk_validate_pipe_wm(dev, a))
		return -EINVAL;

	/*
	 * If our intermediate WM are identical to the final WM, then we can
	 * omit the post-vblank programming; only update if it's different.
	 */
3183 3184
	if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
		newstate->wm.need_postvbl_update = true;
3185 3186 3187 3188

	return 0;
}

3189 3190 3191 3192 3193 3194 3195 3196 3197
/*
 * Merge the watermarks from all active pipes for a specific level.
 */
static void ilk_merge_wm_level(struct drm_device *dev,
			       int level,
			       struct intel_wm_level *ret_wm)
{
	const struct intel_crtc *intel_crtc;

3198 3199
	ret_wm->enable = true;

3200
	for_each_intel_crtc(dev, intel_crtc) {
3201
		const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
3202 3203 3204 3205
		const struct intel_wm_level *wm = &active->wm[level];

		if (!active->pipe_enabled)
			continue;
3206

3207 3208 3209 3210 3211
		/*
		 * The watermark values may have been used in the past,
		 * so we must maintain them in the registers for some
		 * time even if the level is now disabled.
		 */
3212
		if (!wm->enable)
3213
			ret_wm->enable = false;
3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225

		ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
		ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
		ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
		ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
	}
}

/*
 * Merge all low power watermarks for all active pipes.
 */
static void ilk_wm_merge(struct drm_device *dev,
3226
			 const struct intel_wm_config *config,
3227
			 const struct ilk_wm_maximums *max,
3228 3229
			 struct intel_pipe_wm *merged)
{
3230
	struct drm_i915_private *dev_priv = to_i915(dev);
3231
	int level, max_level = ilk_wm_max_level(dev_priv);
3232
	int last_enabled_level = max_level;
3233

3234
	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3235
	if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
3236
	    config->num_pipes_active > 1)
3237
		last_enabled_level = 0;
3238

3239
	/* ILK: FBC WM must be disabled always */
3240
	merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
3241 3242 3243 3244 3245 3246 3247

	/* merge each WM1+ level */
	for (level = 1; level <= max_level; level++) {
		struct intel_wm_level *wm = &merged->wm[level];

		ilk_merge_wm_level(dev, level, wm);

3248 3249 3250 3251 3252
		if (level > last_enabled_level)
			wm->enable = false;
		else if (!ilk_validate_wm_level(level, max, wm))
			/* make sure all following levels get disabled */
			last_enabled_level = level - 1;
3253 3254 3255 3256 3257 3258

		/*
		 * The spec says it is preferred to disable
		 * FBC WMs instead of disabling a WM level.
		 */
		if (wm->fbc_val > max->fbc) {
3259 3260
			if (wm->enable)
				merged->fbc_wm_enabled = false;
3261 3262 3263
			wm->fbc_val = 0;
		}
	}
3264 3265 3266 3267 3268 3269 3270

	/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
	/*
	 * FIXME this is racy. FBC might get enabled later.
	 * What we should check here is whether FBC can be
	 * enabled sometime later.
	 */
3271
	if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
3272
	    intel_fbc_is_active(dev_priv)) {
3273 3274 3275 3276 3277 3278
		for (level = 2; level <= max_level; level++) {
			struct intel_wm_level *wm = &merged->wm[level];

			wm->enable = false;
		}
	}
3279 3280
}

3281 3282 3283 3284 3285 3286
static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
{
	/* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
	return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
}

3287 3288 3289
/* The value we need to program into the WM_LPx latency field */
static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
{
3290
	struct drm_i915_private *dev_priv = to_i915(dev);
3291

3292
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3293 3294 3295 3296 3297
		return 2 * level;
	else
		return dev_priv->wm.pri_latency[level];
}

3298
static void ilk_compute_wm_results(struct drm_device *dev,
3299
				   const struct intel_pipe_wm *merged,
3300
				   enum intel_ddb_partitioning partitioning,
3301
				   struct ilk_wm_values *results)
3302
{
3303
	struct drm_i915_private *dev_priv = to_i915(dev);
3304 3305
	struct intel_crtc *intel_crtc;
	int level, wm_lp;
3306

3307
	results->enable_fbc_wm = merged->fbc_wm_enabled;
3308
	results->partitioning = partitioning;
3309

3310
	/* LP1+ register values */
3311
	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3312
		const struct intel_wm_level *r;
3313

3314
		level = ilk_wm_lp_to_level(wm_lp, merged);
3315

3316
		r = &merged->wm[level];
3317

3318 3319 3320 3321 3322
		/*
		 * Maintain the watermark values even if the level is
		 * disabled. Doing otherwise could cause underruns.
		 */
		results->wm_lp[wm_lp - 1] =
3323
			(ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
3324 3325 3326
			(r->pri_val << WM1_LP_SR_SHIFT) |
			r->cur_val;

3327 3328 3329
		if (r->enable)
			results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;

3330
		if (INTEL_GEN(dev_priv) >= 8)
3331 3332 3333 3334 3335 3336
			results->wm_lp[wm_lp - 1] |=
				r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
		else
			results->wm_lp[wm_lp - 1] |=
				r->fbc_val << WM1_LP_FBC_SHIFT;

3337 3338 3339 3340
		/*
		 * Always set WM1S_LP_EN when spr_val != 0, even if the
		 * level is disabled. Doing otherwise could cause underruns.
		 */
3341
		if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
3342 3343 3344 3345
			WARN_ON(wm_lp != 1);
			results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
		} else
			results->wm_lp_spr[wm_lp - 1] = r->spr_val;
3346
	}
3347

3348
	/* LP0 register values */
3349
	for_each_intel_crtc(dev, intel_crtc) {
3350
		enum pipe pipe = intel_crtc->pipe;
3351 3352
		const struct intel_wm_level *r =
			&intel_crtc->wm.active.ilk.wm[0];
3353 3354 3355 3356

		if (WARN_ON(!r->enable))
			continue;

3357
		results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
3358

3359 3360 3361 3362
		results->wm_pipe[pipe] =
			(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
			(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
			r->cur_val;
3363 3364 3365
	}
}

3366 3367
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
 * case both are at the same level. Prefer r1 in case they're the same. */
3368
static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
3369 3370
						  struct intel_pipe_wm *r1,
						  struct intel_pipe_wm *r2)
3371
{
3372
	int level, max_level = ilk_wm_max_level(to_i915(dev));
3373
	int level1 = 0, level2 = 0;
3374

3375 3376 3377 3378 3379
	for (level = 1; level <= max_level; level++) {
		if (r1->wm[level].enable)
			level1 = level;
		if (r2->wm[level].enable)
			level2 = level;
3380 3381
	}

3382 3383
	if (level1 == level2) {
		if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3384 3385 3386
			return r2;
		else
			return r1;
3387
	} else if (level1 > level2) {
3388 3389 3390 3391 3392 3393
		return r1;
	} else {
		return r2;
	}
}

3394 3395 3396 3397 3398 3399 3400 3401
/* dirty bits used to track which watermarks need changes */
#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)

3402
static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3403 3404
					 const struct ilk_wm_values *old,
					 const struct ilk_wm_values *new)
3405 3406 3407 3408 3409
{
	unsigned int dirty = 0;
	enum pipe pipe;
	int wm_lp;

3410
	for_each_pipe(dev_priv, pipe) {
3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453
		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
			dirty |= WM_DIRTY_LINETIME(pipe);
			/* Must disable LP1+ watermarks too */
			dirty |= WM_DIRTY_LP_ALL;
		}

		if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
			dirty |= WM_DIRTY_PIPE(pipe);
			/* Must disable LP1+ watermarks too */
			dirty |= WM_DIRTY_LP_ALL;
		}
	}

	if (old->enable_fbc_wm != new->enable_fbc_wm) {
		dirty |= WM_DIRTY_FBC;
		/* Must disable LP1+ watermarks too */
		dirty |= WM_DIRTY_LP_ALL;
	}

	if (old->partitioning != new->partitioning) {
		dirty |= WM_DIRTY_DDB;
		/* Must disable LP1+ watermarks too */
		dirty |= WM_DIRTY_LP_ALL;
	}

	/* LP1+ watermarks already deemed dirty, no need to continue */
	if (dirty & WM_DIRTY_LP_ALL)
		return dirty;

	/* Find the lowest numbered LP1+ watermark in need of an update... */
	for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
		if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
		    old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
			break;
	}

	/* ...and mark it and all higher numbered LP1+ watermarks as dirty */
	for (; wm_lp <= 3; wm_lp++)
		dirty |= WM_DIRTY_LP(wm_lp);

	return dirty;
}

3454 3455
static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
			       unsigned int dirty)
3456
{
3457
	struct ilk_wm_values *previous = &dev_priv->wm.hw;
3458
	bool changed = false;
3459

3460 3461 3462
	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
		previous->wm_lp[2] &= ~WM1_LP_SR_EN;
		I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
3463
		changed = true;
3464 3465 3466 3467
	}
	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
		previous->wm_lp[1] &= ~WM1_LP_SR_EN;
		I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
3468
		changed = true;
3469 3470 3471 3472
	}
	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
		previous->wm_lp[0] &= ~WM1_LP_SR_EN;
		I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
3473
		changed = true;
3474
	}
3475

3476 3477 3478 3479
	/*
	 * Don't touch WM1S_LP_EN here.
	 * Doing so could cause underruns.
	 */
3480

3481 3482 3483 3484 3485 3486 3487
	return changed;
}

/*
 * The spec says we shouldn't write when we don't need, because every write
 * causes WMs to be re-evaluated, expending some power.
 */
3488 3489
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
				struct ilk_wm_values *results)
3490
{
3491
	struct ilk_wm_values *previous = &dev_priv->wm.hw;
3492 3493 3494
	unsigned int dirty;
	uint32_t val;

3495
	dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3496 3497 3498 3499 3500
	if (!dirty)
		return;

	_ilk_disable_lp_wm(dev_priv, dirty);

3501
	if (dirty & WM_DIRTY_PIPE(PIPE_A))
3502
		I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
3503
	if (dirty & WM_DIRTY_PIPE(PIPE_B))
3504
		I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
3505
	if (dirty & WM_DIRTY_PIPE(PIPE_C))
3506 3507
		I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);

3508
	if (dirty & WM_DIRTY_LINETIME(PIPE_A))
3509
		I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
3510
	if (dirty & WM_DIRTY_LINETIME(PIPE_B))
3511
		I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
3512
	if (dirty & WM_DIRTY_LINETIME(PIPE_C))
3513 3514
		I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);

3515
	if (dirty & WM_DIRTY_DDB) {
3516
		if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
			val = I915_READ(WM_MISC);
			if (results->partitioning == INTEL_DDB_PART_1_2)
				val &= ~WM_MISC_DATA_PARTITION_5_6;
			else
				val |= WM_MISC_DATA_PARTITION_5_6;
			I915_WRITE(WM_MISC, val);
		} else {
			val = I915_READ(DISP_ARB_CTL2);
			if (results->partitioning == INTEL_DDB_PART_1_2)
				val &= ~DISP_DATA_PARTITION_5_6;
			else
				val |= DISP_DATA_PARTITION_5_6;
			I915_WRITE(DISP_ARB_CTL2, val);
		}
3531 3532
	}

3533
	if (dirty & WM_DIRTY_FBC) {
3534 3535 3536 3537 3538 3539 3540 3541
		val = I915_READ(DISP_ARB_CTL);
		if (results->enable_fbc_wm)
			val &= ~DISP_FBC_WM_DIS;
		else
			val |= DISP_FBC_WM_DIS;
		I915_WRITE(DISP_ARB_CTL, val);
	}

3542 3543 3544 3545
	if (dirty & WM_DIRTY_LP(1) &&
	    previous->wm_lp_spr[0] != results->wm_lp_spr[0])
		I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);

3546
	if (INTEL_GEN(dev_priv) >= 7) {
3547 3548 3549 3550 3551
		if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
			I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
		if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
			I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
	}
3552

3553
	if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3554
		I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
3555
	if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3556
		I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
3557
	if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3558
		I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
3559 3560

	dev_priv->wm.hw = *results;
3561 3562
}

3563
bool ilk_disable_lp_wm(struct drm_device *dev)
3564
{
3565
	struct drm_i915_private *dev_priv = to_i915(dev);
3566 3567 3568 3569

	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}

3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586
static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
{
	u8 enabled_slices;

	/* Slice 1 will always be enabled */
	enabled_slices = 1;

	/* Gen prior to GEN11 have only one DBuf slice */
	if (INTEL_GEN(dev_priv) < 11)
		return enabled_slices;

	if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
		enabled_slices++;

	return enabled_slices;
}

3587 3588 3589 3590 3591 3592 3593 3594
/*
 * FIXME: We still don't have the proper code detect if we need to apply the WA,
 * so assume we'll always need it in order to avoid underruns.
 */
static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);

3595
	if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
3596 3597 3598 3599 3600
		return true;

	return false;
}

3601 3602 3603
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
3604 3605
	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
	    IS_CANNONLAKE(dev_priv))
3606 3607 3608 3609 3610 3611 3612
		return true;

	if (IS_SKYLAKE(dev_priv) &&
	    dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
		return true;

	return false;
3613 3614
}

3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626
/*
 * SAGV dynamically adjusts the system agent voltage and clock frequencies
 * depending on power and performance requirements. The display engine access
 * to system memory is blocked during the adjustment time. Because of the
 * blocking time, having this enabled can cause full system hangs and/or pipe
 * underruns if we don't meet all of the following requirements:
 *
 *  - <= 1 pipe enabled
 *  - All planes can enable watermarks for latencies >= SAGV engine block time
 *  - We're not using an interlaced display configuration
 */
int
3627
intel_enable_sagv(struct drm_i915_private *dev_priv)
3628 3629 3630
{
	int ret;

3631 3632 3633 3634
	if (!intel_has_sagv(dev_priv))
		return 0;

	if (dev_priv->sagv_status == I915_SAGV_ENABLED)
3635 3636 3637
		return 0;

	DRM_DEBUG_KMS("Enabling the SAGV\n");
3638
	mutex_lock(&dev_priv->pcu_lock);
3639 3640 3641 3642 3643

	ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
				      GEN9_SAGV_ENABLE);

	/* We don't need to wait for the SAGV when enabling */
3644
	mutex_unlock(&dev_priv->pcu_lock);
3645 3646 3647 3648 3649

	/*
	 * Some skl systems, pre-release machines in particular,
	 * don't actually have an SAGV.
	 */
3650
	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3651
		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3652
		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3653 3654 3655 3656 3657 3658
		return 0;
	} else if (ret < 0) {
		DRM_ERROR("Failed to enable the SAGV\n");
		return ret;
	}

3659
	dev_priv->sagv_status = I915_SAGV_ENABLED;
3660 3661 3662 3663
	return 0;
}

int
3664
intel_disable_sagv(struct drm_i915_private *dev_priv)
3665
{
3666
	int ret;
3667

3668 3669 3670 3671
	if (!intel_has_sagv(dev_priv))
		return 0;

	if (dev_priv->sagv_status == I915_SAGV_DISABLED)
3672 3673 3674
		return 0;

	DRM_DEBUG_KMS("Disabling the SAGV\n");
3675
	mutex_lock(&dev_priv->pcu_lock);
3676 3677

	/* bspec says to keep retrying for at least 1 ms */
3678 3679 3680 3681
	ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
				GEN9_SAGV_DISABLE,
				GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
				1);
3682
	mutex_unlock(&dev_priv->pcu_lock);
3683 3684 3685 3686 3687

	/*
	 * Some skl systems, pre-release machines in particular,
	 * don't actually have an SAGV.
	 */
3688
	if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3689
		DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3690
		dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3691
		return 0;
3692 3693 3694
	} else if (ret < 0) {
		DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
		return ret;
3695 3696
	}

3697
	dev_priv->sagv_status = I915_SAGV_DISABLED;
3698 3699 3700
	return 0;
}

3701
bool intel_can_enable_sagv(struct drm_atomic_state *state)
3702 3703 3704 3705
{
	struct drm_device *dev = state->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3706 3707
	struct intel_crtc *crtc;
	struct intel_plane *plane;
3708
	struct intel_crtc_state *cstate;
3709
	enum pipe pipe;
3710
	int level, latency;
3711
	int sagv_block_time_us;
3712

3713 3714 3715
	if (!intel_has_sagv(dev_priv))
		return false;

3716 3717 3718 3719 3720 3721 3722
	if (IS_GEN9(dev_priv))
		sagv_block_time_us = 30;
	else if (IS_GEN10(dev_priv))
		sagv_block_time_us = 20;
	else
		sagv_block_time_us = 10;

3723
	/*
3724
	 * SKL+ workaround: bspec recommends we disable the SAGV when we have
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735
	 * more then one pipe enabled
	 *
	 * If there are no active CRTCs, no additional checks need be performed
	 */
	if (hweight32(intel_state->active_crtcs) == 0)
		return true;
	else if (hweight32(intel_state->active_crtcs) > 1)
		return false;

	/* Since we're now guaranteed to only have one active CRTC... */
	pipe = ffs(intel_state->active_crtcs) - 1;
3736
	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3737
	cstate = to_intel_crtc_state(crtc->base.state);
3738

3739
	if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
3740 3741
		return false;

3742
	for_each_intel_plane_on_crtc(dev, crtc, plane) {
3743 3744
		struct skl_plane_wm *wm =
			&cstate->wm.skl.optimal.planes[plane->id];
3745

3746
		/* Skip this plane if it's not enabled */
3747
		if (!wm->wm[0].plane_en)
3748 3749 3750
			continue;

		/* Find the highest enabled wm level for this plane */
3751
		for (level = ilk_wm_max_level(dev_priv);
3752
		     !wm->wm[level].plane_en; --level)
3753 3754
		     { }

3755 3756 3757
		latency = dev_priv->wm.skl_latency[level];

		if (skl_needs_memory_bw_wa(intel_state) &&
V
Ville Syrjälä 已提交
3758
		    plane->base.state->fb->modifier ==
3759 3760 3761
		    I915_FORMAT_MOD_X_TILED)
			latency += 15;

3762
		/*
3763 3764 3765
		 * If any of the planes on this pipe don't enable wm levels that
		 * incur memory latencies higher than sagv_block_time_us we
		 * can't enable the SAGV.
3766
		 */
3767
		if (latency < sagv_block_time_us)
3768 3769 3770 3771 3772 3773
			return false;
	}

	return true;
}

M
Mahesh Kumar 已提交
3774 3775 3776 3777 3778
static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
			      const struct intel_crtc_state *cstate,
			      const unsigned int total_data_rate,
			      const int num_active,
			      struct skl_ddb_allocation *ddb)
3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804
{
	const struct drm_display_mode *adjusted_mode;
	u64 total_data_bw;
	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;

	WARN_ON(ddb_size == 0);

	if (INTEL_GEN(dev_priv) < 11)
		return ddb_size - 4; /* 4 blocks for bypass path allocation */

	adjusted_mode = &cstate->base.adjusted_mode;
	total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);

	/*
	 * 12GB/s is maximum BW supported by single DBuf slice.
	 */
	if (total_data_bw >= GBps(12) || num_active > 1) {
		ddb->enabled_slices = 2;
	} else {
		ddb->enabled_slices = 1;
		ddb_size /= 2;
	}

	return ddb_size;
}

3805 3806
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3807
				   const struct intel_crtc_state *cstate,
3808 3809
				   const unsigned int total_data_rate,
				   struct skl_ddb_allocation *ddb,
3810 3811
				   struct skl_ddb_entry *alloc, /* out */
				   int *num_active /* out */)
3812
{
3813 3814 3815
	struct drm_atomic_state *state = cstate->base.state;
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
	struct drm_i915_private *dev_priv = to_i915(dev);
3816
	struct drm_crtc *for_crtc = cstate->base.crtc;
3817 3818 3819 3820 3821 3822
	const struct drm_crtc_state *crtc_state;
	const struct drm_crtc *crtc;
	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
	u16 ddb_size;
	u32 i;
3823

3824
	if (WARN_ON(!state) || !cstate->base.active) {
3825 3826
		alloc->start = 0;
		alloc->end = 0;
3827
		*num_active = hweight32(dev_priv->active_crtcs);
3828 3829 3830
		return;
	}

3831 3832 3833 3834 3835
	if (intel_state->active_pipe_changes)
		*num_active = hweight32(intel_state->active_crtcs);
	else
		*num_active = hweight32(dev_priv->active_crtcs);

3836 3837
	ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
				      *num_active, ddb);
3838

3839
	/*
3840 3841 3842 3843 3844 3845
	 * If the state doesn't change the active CRTC's or there is no
	 * modeset request, then there's no need to recalculate;
	 * the existing pipe allocation limits should remain unchanged.
	 * Note that we're safe from racing commits since any racing commit
	 * that changes the active CRTC list or do modeset would need to
	 * grab _all_ crtc locks, including the one we currently hold.
3846
	 */
3847
	if (!intel_state->active_pipe_changes && !intel_state->modeset) {
3848 3849 3850 3851 3852
		/*
		 * alloc may be cleared by clear_intel_crtc_state,
		 * copy from old state to be sure
		 */
		*alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb;
3853
		return;
3854
	}
3855

3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881
	/*
	 * Watermark/ddb requirement highly depends upon width of the
	 * framebuffer, So instead of allocating DDB equally among pipes
	 * distribute DDB based on resolution/width of the display.
	 */
	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
		const struct drm_display_mode *adjusted_mode;
		int hdisplay, vdisplay;
		enum pipe pipe;

		if (!crtc_state->enable)
			continue;

		pipe = to_intel_crtc(crtc)->pipe;
		adjusted_mode = &crtc_state->adjusted_mode;
		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
		total_width += hdisplay;

		if (pipe < for_pipe)
			width_before_pipe += hdisplay;
		else if (pipe == for_pipe)
			pipe_width = hdisplay;
	}

	alloc->start = ddb_size * width_before_pipe / total_width;
	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
3882 3883
}

3884
static unsigned int skl_cursor_allocation(int num_active)
3885
{
3886
	if (num_active == 1)
3887 3888 3889 3890 3891
		return 32;

	return 8;
}

3892 3893
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
				       struct skl_ddb_entry *entry, u32 reg)
3894
{
3895 3896 3897 3898 3899 3900 3901 3902 3903
	u16 mask;

	if (INTEL_GEN(dev_priv) >= 11)
		mask = ICL_DDB_ENTRY_MASK;
	else
		mask = SKL_DDB_ENTRY_MASK;
	entry->start = reg & mask;
	entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask;

3904 3905
	if (entry->end)
		entry->end += 1;
3906 3907
}

3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919
static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
			   const enum pipe pipe,
			   const enum plane_id plane_id,
			   struct skl_ddb_allocation *ddb /* out */)
{
	u32 val, val2 = 0;
	int fourcc, pixel_format;

	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
	if (plane_id == PLANE_CURSOR) {
		val = I915_READ(CUR_BUF_CFG(pipe));
3920 3921
		skl_ddb_entry_init_from_hw(dev_priv,
					   &ddb->plane[pipe][plane_id], val);
3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936
		return;
	}

	val = I915_READ(PLANE_CTL(pipe, plane_id));

	/* No DDB allocated for disabled planes */
	if (!(val & PLANE_CTL_ENABLE))
		return;

	pixel_format = val & PLANE_CTL_FORMAT_MASK;
	fourcc = skl_format_to_fourcc(pixel_format,
				      val & PLANE_CTL_ORDER_RGBX,
				      val & PLANE_CTL_ALPHA_MASK);

	val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
3937 3938 3939 3940 3941 3942
	/*
	 * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
	 * registers for now.
	 */
	if (INTEL_GEN(dev_priv) < 11)
		val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
3943 3944

	if (fourcc == DRM_FORMAT_NV12) {
3945 3946 3947 3948
		skl_ddb_entry_init_from_hw(dev_priv,
					   &ddb->plane[pipe][plane_id], val2);
		skl_ddb_entry_init_from_hw(dev_priv,
					   &ddb->uv_plane[pipe][plane_id], val);
3949
	} else {
3950 3951
		skl_ddb_entry_init_from_hw(dev_priv,
					   &ddb->plane[pipe][plane_id], val);
3952 3953 3954
	}
}

3955 3956
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
			  struct skl_ddb_allocation *ddb /* out */)
3957
{
3958
	struct intel_crtc *crtc;
3959

3960 3961
	memset(ddb, 0, sizeof(*ddb));

3962 3963
	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);

3964
	for_each_intel_crtc(&dev_priv->drm, crtc) {
3965
		enum intel_display_power_domain power_domain;
3966 3967
		enum plane_id plane_id;
		enum pipe pipe = crtc->pipe;
3968 3969 3970

		power_domain = POWER_DOMAIN_PIPE(pipe);
		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3971 3972
			continue;

3973 3974 3975
		for_each_plane_id_on_crtc(crtc, plane_id)
			skl_ddb_get_hw_plane_state(dev_priv, pipe,
						   plane_id, ddb);
3976 3977

		intel_display_power_put(dev_priv, power_domain);
3978 3979 3980
	}
}

3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996
/*
 * Determines the downscale amount of a plane for the purposes of watermark calculations.
 * The bspec defines downscale amount as:
 *
 * """
 * Horizontal down scale amount = maximum[1, Horizontal source size /
 *                                           Horizontal destination size]
 * Vertical down scale amount = maximum[1, Vertical source size /
 *                                         Vertical destination size]
 * Total down scale amount = Horizontal down scale amount *
 *                           Vertical down scale amount
 * """
 *
 * Return value is provided in 16.16 fixed point form to retain fractional part.
 * Caller should take care of dividing & rounding off the value.
 */
3997
static uint_fixed_16_16_t
3998 3999
skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
			   const struct intel_plane_state *pstate)
4000
{
4001
	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
4002
	uint32_t src_w, src_h, dst_w, dst_h;
4003 4004
	uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
	uint_fixed_16_16_t downscale_h, downscale_w;
4005

4006
	if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4007
		return u32_to_fixed16(0);
4008 4009

	/* n.b., src is 16.16 fixed point, dst is whole integer */
4010
	if (plane->id == PLANE_CURSOR) {
4011 4012 4013 4014
		/*
		 * Cursors only support 0/180 degree rotation,
		 * hence no need to account for rotation here.
		 */
4015 4016
		src_w = pstate->base.src_w >> 16;
		src_h = pstate->base.src_h >> 16;
4017 4018 4019
		dst_w = pstate->base.crtc_w;
		dst_h = pstate->base.crtc_h;
	} else {
4020 4021 4022 4023 4024
		/*
		 * Src coordinates are already rotated by 270 degrees for
		 * the 90/270 degree plane rotation cases (to match the
		 * GTT mapping), hence no need to account for rotation here.
		 */
4025 4026
		src_w = drm_rect_width(&pstate->base.src) >> 16;
		src_h = drm_rect_height(&pstate->base.src) >> 16;
4027 4028 4029 4030
		dst_w = drm_rect_width(&pstate->base.dst);
		dst_h = drm_rect_height(&pstate->base.dst);
	}

4031 4032 4033 4034
	fp_w_ratio = div_fixed16(src_w, dst_w);
	fp_h_ratio = div_fixed16(src_h, dst_h);
	downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
	downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4035

4036
	return mul_fixed16(downscale_w, downscale_h);
4037 4038
}

4039 4040 4041
static uint_fixed_16_16_t
skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
{
4042
	uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060

	if (!crtc_state->base.enable)
		return pipe_downscale;

	if (crtc_state->pch_pfit.enabled) {
		uint32_t src_w, src_h, dst_w, dst_h;
		uint32_t pfit_size = crtc_state->pch_pfit.size;
		uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
		uint_fixed_16_16_t downscale_h, downscale_w;

		src_w = crtc_state->pipe_src_w;
		src_h = crtc_state->pipe_src_h;
		dst_w = pfit_size >> 16;
		dst_h = pfit_size & 0xffff;

		if (!dst_w || !dst_h)
			return pipe_downscale;

4061 4062 4063 4064
		fp_w_ratio = div_fixed16(src_w, dst_w);
		fp_h_ratio = div_fixed16(src_h, dst_h);
		downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
		downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
4065 4066 4067 4068 4069 4070 4071 4072 4073 4074

		pipe_downscale = mul_fixed16(downscale_w, downscale_h);
	}

	return pipe_downscale;
}

int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
				  struct intel_crtc_state *cstate)
{
4075
	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4076 4077 4078 4079 4080
	struct drm_crtc_state *crtc_state = &cstate->base;
	struct drm_atomic_state *state = crtc_state->state;
	struct drm_plane *plane;
	const struct drm_plane_state *pstate;
	struct intel_plane_state *intel_pstate;
4081
	int crtc_clock, dotclk;
4082 4083
	uint32_t pipe_max_pixel_rate;
	uint_fixed_16_16_t pipe_downscale;
4084
	uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
4085 4086 4087 4088 4089 4090

	if (!cstate->base.enable)
		return 0;

	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
		uint_fixed_16_16_t plane_downscale;
4091
		uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108
		int bpp;

		if (!intel_wm_plane_visible(cstate,
					    to_intel_plane_state(pstate)))
			continue;

		if (WARN_ON(!pstate->fb))
			return -EINVAL;

		intel_pstate = to_intel_plane_state(pstate);
		plane_downscale = skl_plane_downscale_amount(cstate,
							     intel_pstate);
		bpp = pstate->fb->format->cpp[0] * 8;
		if (bpp == 64)
			plane_downscale = mul_fixed16(plane_downscale,
						      fp_9_div_8);

4109
		max_downscale = max_fixed16(plane_downscale, max_downscale);
4110 4111 4112 4113 4114 4115
	}
	pipe_downscale = skl_pipe_downscale_amount(cstate);

	pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);

	crtc_clock = crtc_state->adjusted_mode.crtc_clock;
4116 4117
	dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;

4118
	if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
4119 4120 4121
		dotclk *= 2;

	pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
4122 4123

	if (pipe_max_pixel_rate < crtc_clock) {
4124
		DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n");
4125 4126 4127 4128 4129 4130
		return -EINVAL;
	}

	return 0;
}

4131
static unsigned int
4132 4133
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
			     const struct drm_plane_state *pstate,
4134
			     const int plane)
4135
{
4136
	struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
4137
	struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
4138
	uint32_t data_rate;
4139
	uint32_t width = 0, height = 0;
4140 4141
	struct drm_framebuffer *fb;
	u32 format;
4142
	uint_fixed_16_16_t down_scale_amount;
4143

4144
	if (!intel_pstate->base.visible)
4145
		return 0;
4146 4147

	fb = pstate->fb;
V
Ville Syrjälä 已提交
4148
	format = fb->format->format;
4149

4150
	if (intel_plane->id == PLANE_CURSOR)
4151
		return 0;
4152
	if (plane == 1 && format != DRM_FORMAT_NV12)
4153
		return 0;
4154

4155 4156 4157 4158 4159
	/*
	 * Src coordinates are already rotated by 270 degrees for
	 * the 90/270 degree plane rotation cases (to match the
	 * GTT mapping), hence no need to account for rotation here.
	 */
4160 4161
	width = drm_rect_width(&intel_pstate->base.src) >> 16;
	height = drm_rect_height(&intel_pstate->base.src) >> 16;
4162

4163 4164 4165 4166
	/* UV plane does 1/2 pixel sub-sampling */
	if (plane == 1 && format == DRM_FORMAT_NV12) {
		width /= 2;
		height /= 2;
4167 4168
	}

4169 4170
	data_rate = width * height * fb->format->cpp[plane];

4171
	down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
4172

4173
	return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
4174 4175 4176 4177 4178 4179 4180 4181
}

/*
 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
 * a 8192x4096@32bpp framebuffer:
 *   3 * 4096 * 8192  * 4 < 2^32
 */
static unsigned int
4182
skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
4183 4184
				 unsigned int *plane_data_rate,
				 unsigned int *uv_plane_data_rate)
4185
{
4186 4187
	struct drm_crtc_state *cstate = &intel_cstate->base;
	struct drm_atomic_state *state = cstate->state;
4188 4189
	struct drm_plane *plane;
	const struct drm_plane_state *pstate;
4190
	unsigned int total_data_rate = 0;
4191 4192 4193

	if (WARN_ON(!state))
		return 0;
4194

4195
	/* Calculate and cache data rate for each plane */
4196
	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
4197 4198
		enum plane_id plane_id = to_intel_plane(plane)->id;
		unsigned int rate;
4199

4200
		/* packed/y */
4201 4202
		rate = skl_plane_relative_data_rate(intel_cstate,
						    pstate, 0);
4203
		plane_data_rate[plane_id] = rate;
4204 4205

		total_data_rate += rate;
4206

4207
		/* uv-plane */
4208 4209
		rate = skl_plane_relative_data_rate(intel_cstate,
						    pstate, 1);
4210
		uv_plane_data_rate[plane_id] = rate;
4211

4212
		total_data_rate += rate;
4213 4214 4215 4216 4217
	}

	return total_data_rate;
}

4218
static uint16_t
4219
skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
4220 4221 4222 4223 4224 4225 4226 4227 4228 4229
{
	struct drm_framebuffer *fb = pstate->fb;
	struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
	uint32_t src_w, src_h;
	uint32_t min_scanlines = 8;
	uint8_t plane_bpp;

	if (WARN_ON(!fb))
		return 0;

4230 4231
	/* For packed formats, and uv-plane, return 0 */
	if (plane == 1 && fb->format->format != DRM_FORMAT_NV12)
4232 4233 4234
		return 0;

	/* For Non Y-tile return 8-blocks */
V
Ville Syrjälä 已提交
4235
	if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
4236 4237 4238
	    fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
	    fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
	    fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
4239 4240
		return 8;

4241 4242 4243 4244 4245
	/*
	 * Src coordinates are already rotated by 270 degrees for
	 * the 90/270 degree plane rotation cases (to match the
	 * GTT mapping), hence no need to account for rotation here.
	 */
4246 4247
	src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
	src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
4248 4249

	/* Halve UV plane width and height for NV12 */
4250
	if (plane == 1) {
4251 4252 4253 4254
		src_w /= 2;
		src_h /= 2;
	}

4255
	plane_bpp = fb->format->cpp[plane];
4256

4257
	if (drm_rotation_90_or_270(pstate->rotation)) {
4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280
		switch (plane_bpp) {
		case 1:
			min_scanlines = 32;
			break;
		case 2:
			min_scanlines = 16;
			break;
		case 4:
			min_scanlines = 8;
			break;
		case 8:
			min_scanlines = 4;
			break;
		default:
			WARN(1, "Unsupported pixel depth %u for rotation",
			     plane_bpp);
			min_scanlines = 32;
		}
	}

	return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
}

4281 4282
static void
skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
4283
		 uint16_t *minimum, uint16_t *uv_minimum)
4284 4285 4286 4287 4288
{
	const struct drm_plane_state *pstate;
	struct drm_plane *plane;

	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
4289
		enum plane_id plane_id = to_intel_plane(plane)->id;
4290

4291
		if (plane_id == PLANE_CURSOR)
4292 4293 4294 4295 4296
			continue;

		if (!pstate->visible)
			continue;

4297
		minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
4298
		uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
4299 4300 4301 4302 4303
	}

	minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
}

4304
static int
4305
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
4306 4307
		      struct skl_ddb_allocation *ddb /* out */)
{
4308
	struct drm_atomic_state *state = cstate->base.state;
4309
	struct drm_crtc *crtc = cstate->base.crtc;
4310 4311 4312
	struct drm_device *dev = crtc->dev;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum pipe pipe = intel_crtc->pipe;
4313
	struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
4314
	uint16_t alloc_size, start;
4315
	uint16_t minimum[I915_MAX_PLANES] = {};
4316
	uint16_t uv_minimum[I915_MAX_PLANES] = {};
4317
	unsigned int total_data_rate;
4318
	enum plane_id plane_id;
4319
	int num_active;
4320 4321
	unsigned int plane_data_rate[I915_MAX_PLANES] = {};
	unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
4322
	uint16_t total_min_blocks = 0;
4323

4324 4325
	/* Clear the partitioning for disabled planes. */
	memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
4326
	memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
4327

4328 4329 4330
	if (WARN_ON(!state))
		return 0;

4331
	if (!cstate->base.active) {
4332
		alloc->start = alloc->end = 0;
4333 4334 4335
		return 0;
	}

4336 4337 4338 4339 4340
	total_data_rate = skl_get_total_relative_data_rate(cstate,
							   plane_data_rate,
							   uv_plane_data_rate);
	skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
					   alloc, &num_active);
4341
	alloc_size = skl_ddb_entry_size(alloc);
4342
	if (alloc_size == 0)
4343
		return 0;
4344

4345
	skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum);
4346

4347 4348 4349 4350 4351
	/*
	 * 1. Allocate the mininum required blocks for each active plane
	 * and allocate the cursor, it doesn't require extra allocation
	 * proportional to the data rate.
	 */
4352

4353
	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4354
		total_min_blocks += minimum[plane_id];
4355
		total_min_blocks += uv_minimum[plane_id];
4356 4357
	}

4358 4359 4360 4361 4362 4363 4364
	if (total_min_blocks > alloc_size) {
		DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
		DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks,
							alloc_size);
		return -EINVAL;
	}

4365 4366
	alloc_size -= total_min_blocks;
	ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
4367 4368
	ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;

4369
	/*
4370 4371
	 * 2. Distribute the remaining space in proportion to the amount of
	 * data each plane needs to fetch from memory.
4372 4373 4374
	 *
	 * FIXME: we may not allocate every single block here.
	 */
4375
	if (total_data_rate == 0)
4376
		return 0;
4377

4378
	start = alloc->start;
4379
	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
4380 4381
		unsigned int data_rate, uv_data_rate;
		uint16_t plane_blocks, uv_plane_blocks;
4382

4383
		if (plane_id == PLANE_CURSOR)
4384 4385
			continue;

4386
		data_rate = plane_data_rate[plane_id];
4387 4388

		/*
4389
		 * allocation for (packed formats) or (uv-plane part of planar format):
4390 4391 4392
		 * promote the expression to 64 bits to avoid overflowing, the
		 * result is < available as data_rate / total_data_rate < 1
		 */
4393 4394 4395
		plane_blocks = minimum[plane_id];
		plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
					total_data_rate);
4396

4397 4398
		/* Leave disabled planes at (0,0) */
		if (data_rate) {
4399 4400
			ddb->plane[pipe][plane_id].start = start;
			ddb->plane[pipe][plane_id].end = start + plane_blocks;
4401
		}
4402

4403 4404
		start += plane_blocks;

4405 4406
		/* Allocate DDB for UV plane for planar format/NV12 */
		uv_data_rate = uv_plane_data_rate[plane_id];
4407

4408 4409 4410
		uv_plane_blocks = uv_minimum[plane_id];
		uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
					   total_data_rate);
4411

4412 4413 4414 4415
		if (uv_data_rate) {
			ddb->uv_plane[pipe][plane_id].start = start;
			ddb->uv_plane[pipe][plane_id].end =
				start + uv_plane_blocks;
4416
		}
4417

4418
		start += uv_plane_blocks;
4419 4420
	}

4421
	return 0;
4422 4423
}

4424 4425
/*
 * The max latency should be 257 (max the punit can code is 255 and we add 2us
4426
 * for the read latency) and cpp should always be <= 8, so that
4427 4428 4429
 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
4430 4431
static uint_fixed_16_16_t
skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
4432
	       uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size)
4433
{
4434 4435
	uint32_t wm_intermediate_val;
	uint_fixed_16_16_t ret;
4436 4437

	if (latency == 0)
4438
		return FP_16_16_MAX;
4439

4440
	wm_intermediate_val = latency * pixel_rate * cpp;
4441
	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
4442 4443 4444 4445

	if (INTEL_GEN(dev_priv) >= 10)
		ret = add_fixed16_u32(ret, 1);

4446 4447 4448
	return ret;
}

4449 4450 4451 4452
static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
			uint32_t pipe_htotal,
			uint32_t latency,
			uint_fixed_16_16_t plane_blocks_per_line)
4453
{
4454
	uint32_t wm_intermediate_val;
4455
	uint_fixed_16_16_t ret;
4456 4457

	if (latency == 0)
4458
		return FP_16_16_MAX;
4459 4460

	wm_intermediate_val = latency * pixel_rate;
4461 4462
	wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
					   pipe_htotal * 1000);
4463
	ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
4464 4465 4466
	return ret;
}

4467 4468 4469 4470 4471 4472 4473 4474
static uint_fixed_16_16_t
intel_get_linetime_us(struct intel_crtc_state *cstate)
{
	uint32_t pixel_rate;
	uint32_t crtc_htotal;
	uint_fixed_16_16_t linetime_us;

	if (!cstate->base.active)
4475
		return u32_to_fixed16(0);
4476 4477 4478 4479

	pixel_rate = cstate->pixel_rate;

	if (WARN_ON(pixel_rate == 0))
4480
		return u32_to_fixed16(0);
4481 4482

	crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
4483
	linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
4484 4485 4486 4487

	return linetime_us;
}

4488 4489 4490
static uint32_t
skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
			      const struct intel_plane_state *pstate)
4491 4492
{
	uint64_t adjusted_pixel_rate;
4493
	uint_fixed_16_16_t downscale_amount;
4494 4495

	/* Shouldn't reach here on disabled planes... */
4496
	if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
4497 4498 4499 4500 4501 4502
		return 0;

	/*
	 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
	 * with additional adjustments for plane-specific scaling.
	 */
4503
	adjusted_pixel_rate = cstate->pixel_rate;
4504
	downscale_amount = skl_plane_downscale_amount(cstate, pstate);
4505

4506 4507
	return mul_round_up_u32_fixed16(adjusted_pixel_rate,
					    downscale_amount);
4508 4509
}

4510 4511 4512 4513
static int
skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
			    struct intel_crtc_state *cstate,
			    const struct intel_plane_state *intel_pstate,
4514
			    struct skl_wm_params *wp, int plane_id)
4515
{
4516
	struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
4517 4518
	const struct drm_plane_state *pstate = &intel_pstate->base;
	const struct drm_framebuffer *fb = pstate->fb;
4519
	uint32_t interm_pbpl;
4520 4521 4522
	struct intel_atomic_state *state =
		to_intel_atomic_state(cstate->base.state);
	bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4523

4524
	if (!intel_wm_plane_visible(cstate, intel_pstate))
4525
		return 0;
4526

4527 4528 4529 4530 4531 4532
	/* only NV12 format has two planes */
	if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
		DRM_DEBUG_KMS("Non NV12 format have single plane\n");
		return -EINVAL;
	}

4533 4534 4535 4536 4537 4538 4539
	wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
		      fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
		      fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
		      fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
	wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
	wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
			 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
4540
	wp->is_planar = fb->format->format == DRM_FORMAT_NV12;
4541

4542
	if (plane->id == PLANE_CURSOR) {
4543
		wp->width = intel_pstate->base.crtc_w;
4544
	} else {
4545 4546 4547 4548 4549
		/*
		 * Src coordinates are already rotated by 270 degrees for
		 * the 90/270 degree plane rotation cases (to match the
		 * GTT mapping), hence no need to account for rotation here.
		 */
4550
		wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
4551
	}
4552

4553 4554 4555 4556
	if (plane_id == 1 && wp->is_planar)
		wp->width /= 2;

	wp->cpp = fb->format->cpp[plane_id];
4557 4558
	wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
							     intel_pstate);
4559

4560 4561 4562 4563 4564 4565
	if (INTEL_GEN(dev_priv) >= 11 &&
	    fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 8)
		wp->dbuf_block_size = 256;
	else
		wp->dbuf_block_size = 512;

4566
	if (drm_rotation_90_or_270(pstate->rotation)) {
4567

4568
		switch (wp->cpp) {
4569
		case 1:
4570
			wp->y_min_scanlines = 16;
4571 4572
			break;
		case 2:
4573
			wp->y_min_scanlines = 8;
4574 4575
			break;
		case 4:
4576
			wp->y_min_scanlines = 4;
4577
			break;
4578
		default:
4579
			MISSING_CASE(wp->cpp);
4580
			return -EINVAL;
4581 4582
		}
	} else {
4583
		wp->y_min_scanlines = 4;
4584 4585
	}

4586
	if (apply_memory_bw_wa)
4587
		wp->y_min_scanlines *= 2;
4588

4589 4590 4591
	wp->plane_bytes_per_line = wp->width * wp->cpp;
	if (wp->y_tiled) {
		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
4592 4593
					   wp->y_min_scanlines,
					   wp->dbuf_block_size);
4594 4595 4596 4597

		if (INTEL_GEN(dev_priv) >= 10)
			interm_pbpl++;

4598 4599 4600
		wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
							wp->y_min_scanlines);
	} else if (wp->x_tiled && IS_GEN9(dev_priv)) {
4601 4602
		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
					   wp->dbuf_block_size);
4603
		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4604
	} else {
4605 4606
		interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
					   wp->dbuf_block_size) + 1;
4607
		wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
4608 4609
	}

4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623
	wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
					     wp->plane_blocks_per_line);
	wp->linetime_us = fixed16_to_u32_round_up(
					intel_get_linetime_us(cstate));

	return 0;
}

static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
				struct intel_crtc_state *cstate,
				const struct intel_plane_state *intel_pstate,
				uint16_t ddb_allocation,
				int level,
				const struct skl_wm_params *wp,
4624
				const struct skl_wm_level *result_prev,
4625
				struct skl_wm_level *result /* out */)
4626 4627 4628 4629 4630 4631 4632 4633 4634
{
	const struct drm_plane_state *pstate = &intel_pstate->base;
	uint32_t latency = dev_priv->wm.skl_latency[level];
	uint_fixed_16_16_t method1, method2;
	uint_fixed_16_16_t selected_result;
	uint32_t res_blocks, res_lines;
	struct intel_atomic_state *state =
		to_intel_atomic_state(cstate->base.state);
	bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
4635
	uint32_t min_disp_buf_needed;
4636 4637 4638

	if (latency == 0 ||
	    !intel_wm_plane_visible(cstate, intel_pstate)) {
4639
		result->plane_en = false;
4640 4641 4642 4643
		return 0;
	}

	/* Display WA #1141: kbl,cfl */
4644 4645
	if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
	    IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
4646 4647 4648 4649 4650 4651 4652
	    dev_priv->ipc_enabled)
		latency += 4;

	if (apply_memory_bw_wa && wp->x_tiled)
		latency += 15;

	method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
4653
				 wp->cpp, latency, wp->dbuf_block_size);
4654
	method2 = skl_wm_method2(wp->plane_pixel_rate,
4655
				 cstate->base.adjusted_mode.crtc_htotal,
4656
				 latency,
4657
				 wp->plane_blocks_per_line);
4658

4659 4660
	if (wp->y_tiled) {
		selected_result = max_fixed16(method2, wp->y_tile_minimum);
4661
	} else {
4662
		if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
4663 4664
		     wp->dbuf_block_size < 1) &&
		     (wp->plane_bytes_per_line / wp->dbuf_block_size < 1))
4665
			selected_result = method2;
4666
		else if (ddb_allocation >=
4667
			 fixed16_to_u32_round_up(wp->plane_blocks_per_line))
4668
			selected_result = min_fixed16(method1, method2);
4669
		else if (latency >= wp->linetime_us)
4670
			selected_result = min_fixed16(method1, method2);
4671 4672 4673
		else
			selected_result = method1;
	}
4674

4675
	res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
4676
	res_lines = div_round_up_fixed16(selected_result,
4677
					 wp->plane_blocks_per_line);
4678

4679
	/* Display WA #1125: skl,bxt,kbl,glk */
4680 4681
	if (level == 0 && wp->rc_surface)
		res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
4682 4683

	/* Display WA #1126: skl,bxt,kbl,glk */
4684
	if (level >= 1 && level <= 7) {
4685 4686 4687 4688
		if (wp->y_tiled) {
			res_blocks += fixed16_to_u32_round_up(
							wp->y_tile_minimum);
			res_lines += wp->y_min_scanlines;
4689
		} else {
4690
			res_blocks++;
4691
		}
4692 4693 4694 4695 4696 4697 4698 4699 4700

		/*
		 * Make sure result blocks for higher latency levels are atleast
		 * as high as level below the current level.
		 * Assumption in DDB algorithm optimization for special cases.
		 * Also covers Display WA #1125 for RC.
		 */
		if (result_prev->plane_res_b > res_blocks)
			res_blocks = result_prev->plane_res_b;
4701
	}
4702

4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725
	if (INTEL_GEN(dev_priv) >= 11) {
		if (wp->y_tiled) {
			uint32_t extra_lines;
			uint_fixed_16_16_t fp_min_disp_buf_needed;

			if (res_lines % wp->y_min_scanlines == 0)
				extra_lines = wp->y_min_scanlines;
			else
				extra_lines = wp->y_min_scanlines * 2 -
					      res_lines % wp->y_min_scanlines;

			fp_min_disp_buf_needed = mul_u32_fixed16(res_lines +
						extra_lines,
						wp->plane_blocks_per_line);
			min_disp_buf_needed = fixed16_to_u32_round_up(
						fp_min_disp_buf_needed);
		} else {
			min_disp_buf_needed = DIV_ROUND_UP(res_blocks * 11, 10);
		}
	} else {
		min_disp_buf_needed = res_blocks;
	}

4726 4727
	if ((level > 0 && res_lines > 31) ||
	    res_blocks >= ddb_allocation ||
4728
	    min_disp_buf_needed >= ddb_allocation) {
4729
		result->plane_en = false;
4730

4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745
		/*
		 * If there are no valid level 0 watermarks, then we can't
		 * support this display configuration.
		 */
		if (level) {
			return 0;
		} else {
			struct drm_plane *plane = pstate->plane;

			DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
			DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
				      plane->base.id, plane->name,
				      res_blocks, ddb_allocation, res_lines);
			return -EINVAL;
		}
4746
	}
4747

4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758
	/*
	 * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
	 * disable wm level 1-7 on NV12 planes
	 */
	if (wp->is_planar && level >= 1 &&
	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
	     IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
		result->plane_en = false;
		return 0;
	}

4759
	/* The number of lines are ignored for the level 0 watermark. */
4760 4761 4762
	result->plane_res_b = res_blocks;
	result->plane_res_l = res_lines;
	result->plane_en = true;
4763

4764
	return 0;
4765 4766
}

4767
static int
4768
skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
4769
		      struct skl_ddb_allocation *ddb,
4770 4771
		      struct intel_crtc_state *cstate,
		      const struct intel_plane_state *intel_pstate,
4772
		      const struct skl_wm_params *wm_params,
4773 4774
		      struct skl_plane_wm *wm,
		      int plane_id)
4775
{
4776 4777 4778 4779 4780
	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
	struct drm_plane *plane = intel_pstate->base.plane;
	struct intel_plane *intel_plane = to_intel_plane(plane);
	uint16_t ddb_blocks;
	enum pipe pipe = intel_crtc->pipe;
4781
	int level, max_level = ilk_wm_max_level(dev_priv);
4782
	enum plane_id intel_plane_id = intel_plane->id;
4783
	int ret;
L
Lyude 已提交
4784

4785 4786
	if (WARN_ON(!intel_pstate->base.fb))
		return -EINVAL;
4787

4788 4789 4790
	ddb_blocks = plane_id ?
		     skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
		     skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
4791

4792
	for (level = 0; level <= max_level; level++) {
4793 4794
		struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
							  &wm->wm[level];
4795 4796 4797 4798 4799 4800 4801
		struct skl_wm_level *result_prev;

		if (level)
			result_prev = plane_id ? &wm->uv_wm[level - 1] :
						  &wm->wm[level - 1];
		else
			result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
4802 4803 4804 4805

		ret = skl_compute_plane_wm(dev_priv,
					   cstate,
					   intel_pstate,
4806
					   ddb_blocks,
4807
					   level,
4808
					   wm_params,
4809
					   result_prev,
4810
					   result);
4811 4812 4813
		if (ret)
			return ret;
	}
4814

4815 4816 4817
	if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
		wm->is_planar = true;

4818
	return 0;
4819 4820
}

4821
static uint32_t
4822
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
4823
{
M
Mahesh Kumar 已提交
4824 4825
	struct drm_atomic_state *state = cstate->base.state;
	struct drm_i915_private *dev_priv = to_i915(state->dev);
4826
	uint_fixed_16_16_t linetime_us;
M
Mahesh Kumar 已提交
4827
	uint32_t linetime_wm;
4828

4829
	linetime_us = intel_get_linetime_us(cstate);
4830

4831
	if (is_fixed16_zero(linetime_us))
4832
		return 0;
4833

4834
	linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
M
Mahesh Kumar 已提交
4835

4836 4837 4838 4839
	/* Display WA #1135: bxt:ALL GLK:ALL */
	if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) &&
	    dev_priv->ipc_enabled)
		linetime_wm /= 2;
M
Mahesh Kumar 已提交
4840 4841

	return linetime_wm;
4842 4843
}

4844
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
4845 4846 4847
				      struct skl_wm_params *wp,
				      struct skl_wm_level *wm_l0,
				      uint16_t ddb_allocation,
4848
				      struct skl_wm_level *trans_wm /* out */)
4849
{
4850 4851 4852 4853 4854 4855
	struct drm_device *dev = cstate->base.crtc->dev;
	const struct drm_i915_private *dev_priv = to_i915(dev);
	uint16_t trans_min, trans_y_tile_min;
	const uint16_t trans_amount = 10; /* This is configurable amount */
	uint16_t trans_offset_b, res_blocks;

4856
	if (!cstate->base.active)
4857 4858 4859 4860 4861 4862 4863 4864 4865 4866
		goto exit;

	/* Transition WM are not recommended by HW team for GEN9 */
	if (INTEL_GEN(dev_priv) <= 9)
		goto exit;

	/* Transition WM don't make any sense if ipc is disabled */
	if (!dev_priv->ipc_enabled)
		goto exit;

4867
	trans_min = 0;
4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891
	if (INTEL_GEN(dev_priv) >= 10)
		trans_min = 4;

	trans_offset_b = trans_min + trans_amount;

	if (wp->y_tiled) {
		trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
							wp->y_tile_minimum);
		res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
				trans_offset_b;
	} else {
		res_blocks = wm_l0->plane_res_b + trans_offset_b;

		/* WA BUG:1938466 add one block for non y-tile planes */
		if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
			res_blocks += 1;

	}

	res_blocks += 1;

	if (res_blocks < ddb_allocation) {
		trans_wm->plane_res_b = res_blocks;
		trans_wm->plane_en = true;
4892
		return;
4893
	}
4894

4895
exit:
L
Lyude 已提交
4896
	trans_wm->plane_en = false;
4897 4898
}

4899 4900 4901
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
			     struct skl_ddb_allocation *ddb,
			     struct skl_pipe_wm *pipe_wm)
4902
{
4903
	struct drm_device *dev = cstate->base.crtc->dev;
4904
	struct drm_crtc_state *crtc_state = &cstate->base;
4905
	const struct drm_i915_private *dev_priv = to_i915(dev);
4906 4907
	struct drm_plane *plane;
	const struct drm_plane_state *pstate;
L
Lyude 已提交
4908
	struct skl_plane_wm *wm;
4909
	int ret;
4910

L
Lyude 已提交
4911 4912 4913 4914 4915 4916
	/*
	 * We'll only calculate watermarks for planes that are actually
	 * enabled, so make sure all other planes are set as disabled.
	 */
	memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));

4917 4918 4919 4920
	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
		const struct intel_plane_state *intel_pstate =
						to_intel_plane_state(pstate);
		enum plane_id plane_id = to_intel_plane(plane)->id;
4921
		struct skl_wm_params wm_params;
4922 4923
		enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
		uint16_t ddb_blocks;
4924 4925

		wm = &pipe_wm->planes[plane_id];
4926
		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
4927 4928

		ret = skl_compute_plane_wm_params(dev_priv, cstate,
4929
						  intel_pstate, &wm_params, 0);
4930 4931
		if (ret)
			return ret;
L
Lyude 已提交
4932

4933
		ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
4934
					    intel_pstate, &wm_params, wm, 0);
4935 4936
		if (ret)
			return ret;
4937

4938 4939
		skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
					  ddb_blocks, &wm->trans_wm);
4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957

		/* uv plane watermarks must also be validated for NV12/Planar */
		if (wm_params.is_planar) {
			memset(&wm_params, 0, sizeof(struct skl_wm_params));
			wm->is_planar = true;

			ret = skl_compute_plane_wm_params(dev_priv, cstate,
							  intel_pstate,
							  &wm_params, 1);
			if (ret)
				return ret;

			ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
						    intel_pstate, &wm_params,
						    wm, 1);
			if (ret)
				return ret;
		}
4958
	}
4959

4960
	pipe_wm->linetime = skl_compute_linetime_wm(cstate);
4961

4962
	return 0;
4963 4964
}

4965 4966
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
				i915_reg_t reg,
4967 4968 4969 4970 4971 4972 4973 4974
				const struct skl_ddb_entry *entry)
{
	if (entry->end)
		I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
	else
		I915_WRITE(reg, 0);
}

4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
			       i915_reg_t reg,
			       const struct skl_wm_level *level)
{
	uint32_t val = 0;

	if (level->plane_en) {
		val |= PLANE_WM_EN;
		val |= level->plane_res_b;
		val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
	}

	I915_WRITE(reg, val);
}

4990 4991 4992
static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
			       const struct skl_plane_wm *wm,
			       const struct skl_ddb_allocation *ddb,
4993
			       enum plane_id plane_id)
4994 4995 4996 4997
{
	struct drm_crtc *crtc = &intel_crtc->base;
	struct drm_device *dev = crtc->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
4998
	int level, max_level = ilk_wm_max_level(dev_priv);
4999 5000 5001
	enum pipe pipe = intel_crtc->pipe;

	for (level = 0; level <= max_level; level++) {
5002
		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
5003
				   &wm->wm[level]);
5004
	}
5005
	skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
5006
			   &wm->trans_wm);
5007

5008 5009
	skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
			    &ddb->plane[pipe][plane_id]);
5010
	/* FIXME: add proper NV12 support for ICL. */
5011 5012 5013 5014 5015 5016 5017
	if (INTEL_GEN(dev_priv) >= 11)
		return skl_ddb_entry_write(dev_priv,
					   PLANE_BUF_CFG(pipe, plane_id),
					   &ddb->plane[pipe][plane_id]);
	if (wm->is_planar) {
		skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
				    &ddb->uv_plane[pipe][plane_id]);
5018 5019
		skl_ddb_entry_write(dev_priv,
				    PLANE_NV12_BUF_CFG(pipe, plane_id),
5020 5021 5022 5023 5024 5025
				    &ddb->plane[pipe][plane_id]);
	} else {
		skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
				    &ddb->plane[pipe][plane_id]);
		I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
	}
5026 5027
}

5028 5029 5030
static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
				const struct skl_plane_wm *wm,
				const struct skl_ddb_allocation *ddb)
5031 5032 5033 5034
{
	struct drm_crtc *crtc = &intel_crtc->base;
	struct drm_device *dev = crtc->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
5035
	int level, max_level = ilk_wm_max_level(dev_priv);
5036 5037 5038
	enum pipe pipe = intel_crtc->pipe;

	for (level = 0; level <= max_level; level++) {
5039 5040
		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
				   &wm->wm[level]);
5041
	}
5042
	skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
5043

5044
	skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
5045
			    &ddb->plane[pipe][PLANE_CURSOR]);
5046 5047
}

5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061
bool skl_wm_level_equals(const struct skl_wm_level *l1,
			 const struct skl_wm_level *l2)
{
	if (l1->plane_en != l2->plane_en)
		return false;

	/* If both planes aren't enabled, the rest shouldn't matter */
	if (!l1->plane_en)
		return true;

	return (l1->plane_res_l == l2->plane_res_l &&
		l1->plane_res_b == l2->plane_res_b);
}

5062 5063
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
					   const struct skl_ddb_entry *b)
5064
{
5065
	return a->start < b->end && b->start < a->end;
5066 5067
}

5068 5069
bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
				 const struct skl_ddb_entry **entries,
5070 5071
				 const struct skl_ddb_entry *ddb,
				 int ignore)
5072
{
5073
	enum pipe pipe;
5074

5075 5076 5077
	for_each_pipe(dev_priv, pipe) {
		if (pipe != ignore && entries[pipe] &&
		    skl_ddb_entries_overlap(ddb, entries[pipe]))
5078
			return true;
5079
	}
5080

5081
	return false;
5082 5083
}

5084
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
5085
			      const struct skl_pipe_wm *old_pipe_wm,
5086
			      struct skl_pipe_wm *pipe_wm, /* out */
5087
			      struct skl_ddb_allocation *ddb, /* out */
5088
			      bool *changed /* out */)
5089
{
5090
	struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
5091
	int ret;
5092

5093 5094 5095
	ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
	if (ret)
		return ret;
5096

5097
	if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm)))
5098 5099 5100
		*changed = false;
	else
		*changed = true;
5101

5102
	return 0;
5103 5104
}

5105 5106 5107 5108 5109 5110 5111
static uint32_t
pipes_modified(struct drm_atomic_state *state)
{
	struct drm_crtc *crtc;
	struct drm_crtc_state *cstate;
	uint32_t i, ret = 0;

5112
	for_each_new_crtc_in_state(state, crtc, cstate, i)
5113 5114 5115 5116 5117
		ret |= drm_crtc_mask(crtc);

	return ret;
}

5118
static int
5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137
skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
{
	struct drm_atomic_state *state = cstate->base.state;
	struct drm_device *dev = state->dev;
	struct drm_crtc *crtc = cstate->base.crtc;
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
	struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
	struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
	struct drm_plane_state *plane_state;
	struct drm_plane *plane;
	enum pipe pipe = intel_crtc->pipe;

	drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
		enum plane_id plane_id = to_intel_plane(plane)->id;

		if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
					&new_ddb->plane[pipe][plane_id]) &&
5138 5139
		    skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
					&new_ddb->uv_plane[pipe][plane_id]))
5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151
			continue;

		plane_state = drm_atomic_get_plane_state(state, plane);
		if (IS_ERR(plane_state))
			return PTR_ERR(plane_state);
	}

	return 0;
}

static int
skl_compute_ddb(struct drm_atomic_state *state)
5152
{
5153
	const struct drm_i915_private *dev_priv = to_i915(state->dev);
5154
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5155
	struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
5156 5157 5158
	struct intel_crtc *crtc;
	struct intel_crtc_state *cstate;
	int ret, i;
5159

5160 5161
	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));

5162
	for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
5163 5164 5165 5166 5167 5168 5169
		ret = skl_allocate_pipe_ddb(cstate, ddb);
		if (ret)
			return ret;

		ret = skl_ddb_add_affected_planes(cstate);
		if (ret)
			return ret;
5170 5171 5172 5173 5174
	}

	return 0;
}

5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186
static void
skl_print_wm_changes(const struct drm_atomic_state *state)
{
	const struct drm_device *dev = state->dev;
	const struct drm_i915_private *dev_priv = to_i915(dev);
	const struct intel_atomic_state *intel_state =
		to_intel_atomic_state(state);
	const struct drm_crtc *crtc;
	const struct drm_crtc_state *cstate;
	const struct intel_plane *intel_plane;
	const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
	const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
5187
	int i;
5188

5189
	for_each_new_crtc_in_state(state, crtc, cstate, i) {
5190 5191
		const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
		enum pipe pipe = intel_crtc->pipe;
5192

5193
		for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
5194
			enum plane_id plane_id = intel_plane->id;
5195 5196
			const struct skl_ddb_entry *old, *new;

5197 5198
			old = &old_ddb->plane[pipe][plane_id];
			new = &new_ddb->plane[pipe][plane_id];
5199 5200 5201 5202

			if (skl_ddb_entry_equal(old, new))
				continue;

5203 5204 5205 5206 5207
			DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
					 intel_plane->base.base.id,
					 intel_plane->base.name,
					 old->start, old->end,
					 new->start, new->end);
5208 5209 5210 5211
		}
	}
}

5212
static int
5213
skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
5214
{
5215
	struct drm_device *dev = state->dev;
5216 5217 5218 5219 5220 5221
	const struct drm_i915_private *dev_priv = to_i915(dev);
	const struct drm_crtc *crtc;
	const struct drm_crtc_state *cstate;
	struct intel_crtc *intel_crtc;
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
	uint32_t realloc_pipes = pipes_modified(state);
5222
	int ret, i;
5223

5224 5225 5226 5227
	/*
	 * When we distrust bios wm we always need to recompute to set the
	 * expected DDB allocations for each CRTC.
	 */
5228 5229
	if (dev_priv->wm.distrust_bios_wm)
		(*changed) = true;
5230

5231 5232 5233 5234 5235 5236 5237 5238
	/*
	 * If this transaction isn't actually touching any CRTC's, don't
	 * bother with watermark calculation.  Note that if we pass this
	 * test, we're guaranteed to hold at least one CRTC state mutex,
	 * which means we can safely use values like dev_priv->active_crtcs
	 * since any racing commits that want to update them would need to
	 * hold _all_ CRTC state mutexes.
	 */
5239
	for_each_new_crtc_in_state(state, crtc, cstate, i)
5240
		(*changed) = true;
5241

5242
	if (!*changed)
5243 5244
		return 0;

5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281
	/*
	 * If this is our first atomic update following hardware readout,
	 * we can't trust the DDB that the BIOS programmed for us.  Let's
	 * pretend that all pipes switched active status so that we'll
	 * ensure a full DDB recompute.
	 */
	if (dev_priv->wm.distrust_bios_wm) {
		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
				       state->acquire_ctx);
		if (ret)
			return ret;

		intel_state->active_pipe_changes = ~0;

		/*
		 * We usually only initialize intel_state->active_crtcs if we
		 * we're doing a modeset; make sure this field is always
		 * initialized during the sanitization process that happens
		 * on the first commit too.
		 */
		if (!intel_state->modeset)
			intel_state->active_crtcs = dev_priv->active_crtcs;
	}

	/*
	 * If the modeset changes which CRTC's are active, we need to
	 * recompute the DDB allocation for *all* active pipes, even
	 * those that weren't otherwise being modified in any way by this
	 * atomic commit.  Due to the shrinking of the per-pipe allocations
	 * when new active CRTC's are added, it's possible for a pipe that
	 * we were already using and aren't changing at all here to suddenly
	 * become invalid if its DDB needs exceeds its new allocation.
	 *
	 * Note that if we wind up doing a full DDB recompute, we can't let
	 * any other display updates race with this transaction, so we need
	 * to grab the lock on *all* CRTC's.
	 */
5282
	if (intel_state->active_pipe_changes || intel_state->modeset) {
5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312
		realloc_pipes = ~0;
		intel_state->wm_results.dirty_pipes = ~0;
	}

	/*
	 * We're not recomputing for the pipes not included in the commit, so
	 * make sure we start with the current state.
	 */
	for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
		struct intel_crtc_state *cstate;

		cstate = intel_atomic_get_crtc_state(state, intel_crtc);
		if (IS_ERR(cstate))
			return PTR_ERR(cstate);
	}

	return 0;
}

static int
skl_compute_wm(struct drm_atomic_state *state)
{
	struct drm_crtc *crtc;
	struct drm_crtc_state *cstate;
	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
	struct skl_ddb_values *results = &intel_state->wm_results;
	struct skl_pipe_wm *pipe_wm;
	bool changed = false;
	int ret, i;

5313 5314 5315
	/* Clear all dirty flags */
	results->dirty_pipes = 0;

5316 5317 5318 5319
	ret = skl_ddb_add_affected_pipes(state, &changed);
	if (ret || !changed)
		return ret;

5320
	ret = skl_compute_ddb(state);
5321 5322 5323
	if (ret)
		return ret;

5324 5325 5326 5327 5328 5329 5330 5331 5332 5333
	/*
	 * Calculate WM's for all pipes that are part of this transaction.
	 * Note that the DDB allocation above may have added more CRTC's that
	 * weren't otherwise being modified (and set bits in dirty_pipes) if
	 * pipe allocations had to change.
	 *
	 * FIXME:  Now that we're doing this in the atomic check phase, we
	 * should allow skl_update_pipe_wm() to return failure in cases where
	 * no suitable watermark values can be found.
	 */
5334
	for_each_new_crtc_in_state(state, crtc, cstate, i) {
5335 5336
		struct intel_crtc_state *intel_cstate =
			to_intel_crtc_state(cstate);
5337 5338
		const struct skl_pipe_wm *old_pipe_wm =
			&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
5339 5340

		pipe_wm = &intel_cstate->wm.skl.optimal;
5341 5342
		ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
					 &results->ddb, &changed);
5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355
		if (ret)
			return ret;

		if (changed)
			results->dirty_pipes |= drm_crtc_mask(crtc);

		if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
			/* This pipe's WM's did not change */
			continue;

		intel_cstate->update_wm_pre = true;
	}

5356 5357
	skl_print_wm_changes(state);

5358 5359 5360
	return 0;
}

5361 5362 5363 5364 5365 5366
static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
				      struct intel_crtc_state *cstate)
{
	struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
5367
	const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
5368
	enum pipe pipe = crtc->pipe;
5369
	enum plane_id plane_id;
5370 5371 5372

	if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
		return;
5373 5374

	I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
5375

5376 5377 5378 5379 5380 5381 5382 5383
	for_each_plane_id_on_crtc(crtc, plane_id) {
		if (plane_id != PLANE_CURSOR)
			skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
					   ddb, plane_id);
		else
			skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
					    ddb);
	}
5384 5385
}

5386 5387
static void skl_initial_wm(struct intel_atomic_state *state,
			   struct intel_crtc_state *cstate)
5388
{
5389
	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5390
	struct drm_device *dev = intel_crtc->base.dev;
5391
	struct drm_i915_private *dev_priv = to_i915(dev);
5392 5393
	struct skl_ddb_values *results = &state->wm_results;
	struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
5394
	enum pipe pipe = intel_crtc->pipe;
5395

5396
	if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
5397 5398
		return;

5399
	mutex_lock(&dev_priv->wm.wm_mutex);
5400

5401 5402
	if (cstate->base.active_changed)
		skl_atomic_update_crtc_wm(state, cstate);
5403

5404 5405 5406 5407
	memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
	       sizeof(hw_vals->ddb.uv_plane[pipe]));
	memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
	       sizeof(hw_vals->ddb.plane[pipe]));
5408 5409

	mutex_unlock(&dev_priv->wm.wm_mutex);
5410 5411
}

5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429
static void ilk_compute_wm_config(struct drm_device *dev,
				  struct intel_wm_config *config)
{
	struct intel_crtc *crtc;

	/* Compute the currently _active_ config */
	for_each_intel_crtc(dev, crtc) {
		const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;

		if (!wm->pipe_enabled)
			continue;

		config->sprites_enabled |= wm->sprites_enabled;
		config->sprites_scaled |= wm->sprites_scaled;
		config->num_pipes_active++;
	}
}

5430
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
5431
{
5432
	struct drm_device *dev = &dev_priv->drm;
5433
	struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
5434
	struct ilk_wm_maximums max;
5435
	struct intel_wm_config config = {};
5436
	struct ilk_wm_values results = {};
5437
	enum intel_ddb_partitioning partitioning;
5438

5439 5440 5441 5442
	ilk_compute_wm_config(dev, &config);

	ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
	ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
5443 5444

	/* 5/6 split only in single pipe config on IVB+ */
5445
	if (INTEL_GEN(dev_priv) >= 7 &&
5446 5447 5448
	    config.num_pipes_active == 1 && config.sprites_enabled) {
		ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
		ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
5449

5450
		best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
5451
	} else {
5452
		best_lp_wm = &lp_wm_1_2;
5453 5454
	}

5455
	partitioning = (best_lp_wm == &lp_wm_1_2) ?
5456
		       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
5457

5458
	ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
5459

5460
	ilk_write_wm_values(dev_priv, &results);
5461 5462
}

5463 5464
static void ilk_initial_watermarks(struct intel_atomic_state *state,
				   struct intel_crtc_state *cstate)
5465
{
5466 5467
	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5468

5469
	mutex_lock(&dev_priv->wm.wm_mutex);
5470
	intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
5471 5472 5473
	ilk_program_watermarks(dev_priv);
	mutex_unlock(&dev_priv->wm.wm_mutex);
}
5474

5475 5476
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
				    struct intel_crtc_state *cstate)
5477 5478 5479
{
	struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
	struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
5480

5481 5482
	mutex_lock(&dev_priv->wm.wm_mutex);
	if (cstate->wm.need_postvbl_update) {
5483
		intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
5484 5485 5486
		ilk_program_watermarks(dev_priv);
	}
	mutex_unlock(&dev_priv->wm.wm_mutex);
5487 5488
}

5489 5490
static inline void skl_wm_level_from_reg_val(uint32_t val,
					     struct skl_wm_level *level)
5491
{
5492 5493 5494 5495
	level->plane_en = val & PLANE_WM_EN;
	level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
	level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
		PLANE_WM_LINES_MASK;
5496 5497
}

5498 5499
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
			      struct skl_pipe_wm *out)
5500
{
5501
	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5502 5503
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum pipe pipe = intel_crtc->pipe;
5504 5505
	int level, max_level;
	enum plane_id plane_id;
5506
	uint32_t val;
5507

5508
	max_level = ilk_wm_max_level(dev_priv);
5509

5510 5511
	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
		struct skl_plane_wm *wm = &out->planes[plane_id];
5512

5513
		for (level = 0; level <= max_level; level++) {
5514 5515
			if (plane_id != PLANE_CURSOR)
				val = I915_READ(PLANE_WM(pipe, plane_id, level));
5516 5517
			else
				val = I915_READ(CUR_WM(pipe, level));
5518

5519
			skl_wm_level_from_reg_val(val, &wm->wm[level]);
5520 5521
		}

5522 5523
		if (plane_id != PLANE_CURSOR)
			val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
5524 5525 5526 5527
		else
			val = I915_READ(CUR_WM_TRANS(pipe));

		skl_wm_level_from_reg_val(val, &wm->trans_wm);
5528 5529
	}

5530 5531
	if (!intel_crtc->active)
		return;
5532

5533
	out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
5534 5535 5536 5537
}

void skl_wm_get_hw_state(struct drm_device *dev)
{
5538
	struct drm_i915_private *dev_priv = to_i915(dev);
5539
	struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
5540
	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
5541
	struct drm_crtc *crtc;
5542 5543
	struct intel_crtc *intel_crtc;
	struct intel_crtc_state *cstate;
5544

5545
	skl_ddb_get_hw_state(dev_priv, ddb);
5546 5547 5548 5549 5550 5551
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
		intel_crtc = to_intel_crtc(crtc);
		cstate = to_intel_crtc_state(crtc->state);

		skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);

5552
		if (intel_crtc->active)
5553 5554
			hw->dirty_pipes |= drm_crtc_mask(crtc);
	}
5555

5556 5557 5558 5559
	if (dev_priv->active_crtcs) {
		/* Fully recompute DDB on first atomic commit */
		dev_priv->wm.distrust_bios_wm = true;
	} else {
5560 5561 5562 5563 5564 5565
		/*
		 * Easy/common case; just sanitize DDB now if everything off
		 * Keep dbuf slice info intact
		 */
		memset(ddb->plane, 0, sizeof(ddb->plane));
		memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
5566
	}
5567 5568
}

5569 5570 5571
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
	struct drm_device *dev = crtc->dev;
5572
	struct drm_i915_private *dev_priv = to_i915(dev);
5573
	struct ilk_wm_values *hw = &dev_priv->wm.hw;
5574
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5575
	struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
5576
	struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
5577
	enum pipe pipe = intel_crtc->pipe;
5578
	static const i915_reg_t wm0_pipe_reg[] = {
5579 5580 5581 5582 5583 5584
		[PIPE_A] = WM0_PIPEA_ILK,
		[PIPE_B] = WM0_PIPEB_ILK,
		[PIPE_C] = WM0_PIPEC_IVB,
	};

	hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
5585
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5586
		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
5587

5588 5589
	memset(active, 0, sizeof(*active));

5590
	active->pipe_enabled = intel_crtc->active;
5591 5592

	if (active->pipe_enabled) {
5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606
		u32 tmp = hw->wm_pipe[pipe];

		/*
		 * For active pipes LP0 watermark is marked as
		 * enabled, and LP1+ watermaks as disabled since
		 * we can't really reverse compute them in case
		 * multiple pipes are active.
		 */
		active->wm[0].enable = true;
		active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
		active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
		active->linetime = hw->wm_linetime[pipe];
	} else {
5607
		int level, max_level = ilk_wm_max_level(dev_priv);
5608 5609 5610 5611 5612 5613 5614 5615 5616

		/*
		 * For inactive pipes, all watermark levels
		 * should be marked as enabled but zeroed,
		 * which is what we'd compute them to.
		 */
		for (level = 0; level <= max_level; level++)
			active->wm[level].enable = true;
	}
5617 5618

	intel_crtc->wm.active.ilk = *active;
5619 5620
}

5621 5622 5623 5624 5625
#define _FW_WM(value, plane) \
	(((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
#define _FW_WM_VLV(value, plane) \
	(((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)

5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651
static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
			       struct g4x_wm_values *wm)
{
	uint32_t tmp;

	tmp = I915_READ(DSPFW1);
	wm->sr.plane = _FW_WM(tmp, SR);
	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);

	tmp = I915_READ(DSPFW2);
	wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
	wm->sr.fbc = _FW_WM(tmp, FBC_SR);
	wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
	wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);

	tmp = I915_READ(DSPFW3);
	wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
	wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
	wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
}

5652 5653 5654 5655 5656 5657 5658 5659 5660
static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
			       struct vlv_wm_values *wm)
{
	enum pipe pipe;
	uint32_t tmp;

	for_each_pipe(dev_priv, pipe) {
		tmp = I915_READ(VLV_DDL(pipe));

5661
		wm->ddl[pipe].plane[PLANE_PRIMARY] =
5662
			(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5663
		wm->ddl[pipe].plane[PLANE_CURSOR] =
5664
			(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5665
		wm->ddl[pipe].plane[PLANE_SPRITE0] =
5666
			(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
5667
		wm->ddl[pipe].plane[PLANE_SPRITE1] =
5668 5669 5670 5671 5672
			(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
	}

	tmp = I915_READ(DSPFW1);
	wm->sr.plane = _FW_WM(tmp, SR);
5673 5674 5675
	wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
	wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
	wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
5676 5677

	tmp = I915_READ(DSPFW2);
5678 5679 5680
	wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
	wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
	wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
5681 5682 5683 5684 5685 5686

	tmp = I915_READ(DSPFW3);
	wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);

	if (IS_CHERRYVIEW(dev_priv)) {
		tmp = I915_READ(DSPFW7_CHV);
5687 5688
		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5689 5690

		tmp = I915_READ(DSPFW8_CHV);
5691 5692
		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
5693 5694

		tmp = I915_READ(DSPFW9_CHV);
5695 5696
		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
		wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
5697 5698 5699

		tmp = I915_READ(DSPHOWM);
		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5700 5701 5702 5703 5704 5705 5706 5707 5708
		wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
		wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
		wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5709 5710
	} else {
		tmp = I915_READ(DSPFW7);
5711 5712
		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
5713 5714 5715

		tmp = I915_READ(DSPHOWM);
		wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
5716 5717 5718 5719 5720 5721
		wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
		wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
		wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
		wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
		wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
		wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
5722 5723 5724 5725 5726 5727
	}
}

#undef _FW_WM
#undef _FW_WM_VLV

5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868
void g4x_wm_get_hw_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct g4x_wm_values *wm = &dev_priv->wm.g4x;
	struct intel_crtc *crtc;

	g4x_read_wm_values(dev_priv, wm);

	wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;

	for_each_intel_crtc(dev, crtc) {
		struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);
		struct g4x_wm_state *active = &crtc->wm.active.g4x;
		struct g4x_pipe_wm *raw;
		enum pipe pipe = crtc->pipe;
		enum plane_id plane_id;
		int level, max_level;

		active->cxsr = wm->cxsr;
		active->hpll_en = wm->hpll_en;
		active->fbc_en = wm->fbc_en;

		active->sr = wm->sr;
		active->hpll = wm->hpll;

		for_each_plane_id_on_crtc(crtc, plane_id) {
			active->wm.plane[plane_id] =
				wm->pipe[pipe].plane[plane_id];
		}

		if (wm->cxsr && wm->hpll_en)
			max_level = G4X_WM_LEVEL_HPLL;
		else if (wm->cxsr)
			max_level = G4X_WM_LEVEL_SR;
		else
			max_level = G4X_WM_LEVEL_NORMAL;

		level = G4X_WM_LEVEL_NORMAL;
		raw = &crtc_state->wm.g4x.raw[level];
		for_each_plane_id_on_crtc(crtc, plane_id)
			raw->plane[plane_id] = active->wm.plane[plane_id];

		if (++level > max_level)
			goto out;

		raw = &crtc_state->wm.g4x.raw[level];
		raw->plane[PLANE_PRIMARY] = active->sr.plane;
		raw->plane[PLANE_CURSOR] = active->sr.cursor;
		raw->plane[PLANE_SPRITE0] = 0;
		raw->fbc = active->sr.fbc;

		if (++level > max_level)
			goto out;

		raw = &crtc_state->wm.g4x.raw[level];
		raw->plane[PLANE_PRIMARY] = active->hpll.plane;
		raw->plane[PLANE_CURSOR] = active->hpll.cursor;
		raw->plane[PLANE_SPRITE0] = 0;
		raw->fbc = active->hpll.fbc;

	out:
		for_each_plane_id_on_crtc(crtc, plane_id)
			g4x_raw_plane_wm_set(crtc_state, level,
					     plane_id, USHRT_MAX);
		g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);

		crtc_state->wm.g4x.optimal = *active;
		crtc_state->wm.g4x.intermediate = *active;

		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
			      pipe_name(pipe),
			      wm->pipe[pipe].plane[PLANE_PRIMARY],
			      wm->pipe[pipe].plane[PLANE_CURSOR],
			      wm->pipe[pipe].plane[PLANE_SPRITE0]);
	}

	DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
		      wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
	DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
		      wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
	DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n",
		      yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en));
}

void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
{
	struct intel_plane *plane;
	struct intel_crtc *crtc;

	mutex_lock(&dev_priv->wm.wm_mutex);

	for_each_intel_plane(&dev_priv->drm, plane) {
		struct intel_crtc *crtc =
			intel_get_crtc_for_pipe(dev_priv, plane->pipe);
		struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);
		struct intel_plane_state *plane_state =
			to_intel_plane_state(plane->base.state);
		struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
		enum plane_id plane_id = plane->id;
		int level;

		if (plane_state->base.visible)
			continue;

		for (level = 0; level < 3; level++) {
			struct g4x_pipe_wm *raw =
				&crtc_state->wm.g4x.raw[level];

			raw->plane[plane_id] = 0;
			wm_state->wm.plane[plane_id] = 0;
		}

		if (plane_id == PLANE_PRIMARY) {
			for (level = 0; level < 3; level++) {
				struct g4x_pipe_wm *raw =
					&crtc_state->wm.g4x.raw[level];
				raw->fbc = 0;
			}

			wm_state->sr.fbc = 0;
			wm_state->hpll.fbc = 0;
			wm_state->fbc_en = false;
		}
	}

	for_each_intel_crtc(&dev_priv->drm, crtc) {
		struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);

		crtc_state->wm.g4x.intermediate =
			crtc_state->wm.g4x.optimal;
		crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
	}

	g4x_program_watermarks(dev_priv);

	mutex_unlock(&dev_priv->wm.wm_mutex);
}

5869 5870 5871 5872
void vlv_wm_get_hw_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct vlv_wm_values *wm = &dev_priv->wm.vlv;
5873
	struct intel_crtc *crtc;
5874 5875 5876 5877 5878 5879 5880 5881
	u32 val;

	vlv_read_wm_values(dev_priv, wm);

	wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
	wm->level = VLV_WM_LEVEL_PM2;

	if (IS_CHERRYVIEW(dev_priv)) {
5882
		mutex_lock(&dev_priv->pcu_lock);
5883 5884 5885 5886 5887

		val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
		if (val & DSP_MAXFIFO_PM5_ENABLE)
			wm->level = VLV_WM_LEVEL_PM5;

5888 5889 5890 5891 5892 5893 5894 5895 5896
		/*
		 * If DDR DVFS is disabled in the BIOS, Punit
		 * will never ack the request. So if that happens
		 * assume we don't have to enable/disable DDR DVFS
		 * dynamically. To test that just set the REQ_ACK
		 * bit to poke the Punit, but don't change the
		 * HIGH/LOW bits so that we don't actually change
		 * the current state.
		 */
5897
		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910
		val |= FORCE_DDR_FREQ_REQ_ACK;
		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);

		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
				      "assuming DDR DVFS is disabled\n");
			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
		} else {
			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
				wm->level = VLV_WM_LEVEL_DDR_DVFS;
		}
5911

5912
		mutex_unlock(&dev_priv->pcu_lock);
5913 5914
	}

5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930
	for_each_intel_crtc(dev, crtc) {
		struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);
		struct vlv_wm_state *active = &crtc->wm.active.vlv;
		const struct vlv_fifo_state *fifo_state =
			&crtc_state->wm.vlv.fifo_state;
		enum pipe pipe = crtc->pipe;
		enum plane_id plane_id;
		int level;

		vlv_get_fifo_size(crtc_state);

		active->num_levels = wm->level + 1;
		active->cxsr = wm->cxsr;

		for (level = 0; level < active->num_levels; level++) {
5931
			struct g4x_pipe_wm *raw =
5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952
				&crtc_state->wm.vlv.raw[level];

			active->sr[level].plane = wm->sr.plane;
			active->sr[level].cursor = wm->sr.cursor;

			for_each_plane_id_on_crtc(crtc, plane_id) {
				active->wm[level].plane[plane_id] =
					wm->pipe[pipe].plane[plane_id];

				raw->plane[plane_id] =
					vlv_invert_wm_value(active->wm[level].plane[plane_id],
							    fifo_state->plane[plane_id]);
			}
		}

		for_each_plane_id_on_crtc(crtc, plane_id)
			vlv_raw_plane_wm_set(crtc_state, level,
					     plane_id, USHRT_MAX);
		vlv_invalidate_wms(crtc, active, level);

		crtc_state->wm.vlv.optimal = *active;
5953
		crtc_state->wm.vlv.intermediate = *active;
5954

5955
		DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
5956 5957 5958 5959 5960
			      pipe_name(pipe),
			      wm->pipe[pipe].plane[PLANE_PRIMARY],
			      wm->pipe[pipe].plane[PLANE_CURSOR],
			      wm->pipe[pipe].plane[PLANE_SPRITE0],
			      wm->pipe[pipe].plane[PLANE_SPRITE1]);
5961
	}
5962 5963 5964 5965 5966

	DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
		      wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}

5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990
void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
{
	struct intel_plane *plane;
	struct intel_crtc *crtc;

	mutex_lock(&dev_priv->wm.wm_mutex);

	for_each_intel_plane(&dev_priv->drm, plane) {
		struct intel_crtc *crtc =
			intel_get_crtc_for_pipe(dev_priv, plane->pipe);
		struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);
		struct intel_plane_state *plane_state =
			to_intel_plane_state(plane->base.state);
		struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
		const struct vlv_fifo_state *fifo_state =
			&crtc_state->wm.vlv.fifo_state;
		enum plane_id plane_id = plane->id;
		int level;

		if (plane_state->base.visible)
			continue;

		for (level = 0; level < wm_state->num_levels; level++) {
5991
			struct g4x_pipe_wm *raw =
5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015
				&crtc_state->wm.vlv.raw[level];

			raw->plane[plane_id] = 0;

			wm_state->wm[level].plane[plane_id] =
				vlv_invert_wm_value(raw->plane[plane_id],
						    fifo_state->plane[plane_id]);
		}
	}

	for_each_intel_crtc(&dev_priv->drm, crtc) {
		struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);

		crtc_state->wm.vlv.intermediate =
			crtc_state->wm.vlv.optimal;
		crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
	}

	vlv_program_watermarks(dev_priv);

	mutex_unlock(&dev_priv->wm.wm_mutex);
}

6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031
/*
 * FIXME should probably kill this and improve
 * the real watermark readout/sanitation instead
 */
static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
{
	I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
	I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
	I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);

	/*
	 * Don't touch WM1S_LP_EN here.
	 * Doing so could cause underruns.
	 */
}

6032 6033
void ilk_wm_get_hw_state(struct drm_device *dev)
{
6034
	struct drm_i915_private *dev_priv = to_i915(dev);
6035
	struct ilk_wm_values *hw = &dev_priv->wm.hw;
6036 6037
	struct drm_crtc *crtc;

6038 6039
	ilk_init_lp_watermarks(dev_priv);

6040
	for_each_crtc(dev, crtc)
6041 6042 6043 6044 6045 6046 6047
		ilk_pipe_wm_get_hw_state(crtc);

	hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
	hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
	hw->wm_lp[2] = I915_READ(WM3_LP_ILK);

	hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
6048
	if (INTEL_GEN(dev_priv) >= 7) {
6049 6050 6051
		hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
		hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
	}
6052

6053
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
6054 6055
		hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6056
	else if (IS_IVYBRIDGE(dev_priv))
6057 6058
		hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
			INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
6059 6060 6061 6062 6063

	hw->enable_fbc_wm =
		!(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}

6064 6065
/**
 * intel_update_watermarks - update FIFO watermark values based on current modes
6066
 * @crtc: the #intel_crtc on which to compute the WM
6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096
 *
 * Calculate watermark values for the various WM regs based on current mode
 * and plane configuration.
 *
 * There are several cases to deal with here:
 *   - normal (i.e. non-self-refresh)
 *   - self-refresh (SR) mode
 *   - lines are large relative to FIFO size (buffer can hold up to 2)
 *   - lines are small relative to FIFO size (buffer can hold more than 2
 *     lines), so need to account for TLB latency
 *
 *   The normal calculation is:
 *     watermark = dotclock * bytes per pixel * latency
 *   where latency is platform & configuration dependent (we assume pessimal
 *   values here).
 *
 *   The SR calculation is:
 *     watermark = (trunc(latency/line time)+1) * surface width *
 *       bytes per pixel
 *   where
 *     line time = htotal / dotclock
 *     surface width = hdisplay for normal plane and 64 for cursor
 *   and latency is assumed to be high, as above.
 *
 * The final value programmed to the register should always be rounded up,
 * and include an extra 2 entries to account for clock crossings.
 *
 * We don't use the sprite, so we can ignore that.  And on Crestline we have
 * to set the non-SR watermarks to 8.
 */
6097
void intel_update_watermarks(struct intel_crtc *crtc)
6098
{
6099
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6100 6101

	if (dev_priv->display.update_wm)
6102
		dev_priv->display.update_wm(crtc);
6103 6104
}

6105 6106 6107 6108
void intel_enable_ipc(struct drm_i915_private *dev_priv)
{
	u32 val;

6109 6110 6111 6112 6113 6114
	/* Display WA #0477 WaDisableIPC: skl */
	if (IS_SKYLAKE(dev_priv)) {
		dev_priv->ipc_enabled = false;
		return;
	}

6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134
	val = I915_READ(DISP_ARB_CTL2);

	if (dev_priv->ipc_enabled)
		val |= DISP_IPC_ENABLE;
	else
		val &= ~DISP_IPC_ENABLE;

	I915_WRITE(DISP_ARB_CTL2, val);
}

void intel_init_ipc(struct drm_i915_private *dev_priv)
{
	dev_priv->ipc_enabled = false;
	if (!HAS_IPC(dev_priv))
		return;

	dev_priv->ipc_enabled = true;
	intel_enable_ipc(dev_priv);
}

6135
/*
6136 6137 6138 6139 6140 6141 6142 6143
 * Lock protecting IPS related data structures
 */
DEFINE_SPINLOCK(mchdev_lock);

/* Global for IPS driver to get at the current i915 device. Protected by
 * mchdev_lock. */
static struct drm_i915_private *i915_mch_dev;

6144
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
6145 6146 6147
{
	u16 rgvswctl;

6148
	lockdep_assert_held(&mchdev_lock);
6149

6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166
	rgvswctl = I915_READ16(MEMSWCTL);
	if (rgvswctl & MEMCTL_CMD_STS) {
		DRM_DEBUG("gpu busy, RCS change rejected\n");
		return false; /* still busy with another command */
	}

	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
	I915_WRITE16(MEMSWCTL, rgvswctl);
	POSTING_READ16(MEMSWCTL);

	rgvswctl |= MEMCTL_CMD_STS;
	I915_WRITE16(MEMSWCTL, rgvswctl);

	return true;
}

6167
static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
6168
{
6169
	u32 rgvmodectl;
6170 6171
	u8 fmax, fmin, fstart, vstart;

6172 6173
	spin_lock_irq(&mchdev_lock);

6174 6175
	rgvmodectl = I915_READ(MEMMODECTL);

6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195
	/* Enable temp reporting */
	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);

	/* 100ms RC evaluation intervals */
	I915_WRITE(RCUPEI, 100000);
	I915_WRITE(RCDNEI, 100000);

	/* Set max/min thresholds to 90ms and 80ms respectively */
	I915_WRITE(RCBMAXAVG, 90000);
	I915_WRITE(RCBMINAVG, 80000);

	I915_WRITE(MEMIHYST, 1);

	/* Set up min, max, and cur for interrupt handling */
	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
		MEMMODE_FSTART_SHIFT;

6196
	vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
6197 6198
		PXVFREQ_PX_SHIFT;

6199 6200
	dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
	dev_priv->ips.fstart = fstart;
6201

6202 6203 6204
	dev_priv->ips.max_delay = fstart;
	dev_priv->ips.min_delay = fmin;
	dev_priv->ips.cur_delay = fstart;
6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220

	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
			 fmax, fmin, fstart);

	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);

	/*
	 * Interrupts will be enabled in ironlake_irq_postinstall
	 */

	I915_WRITE(VIDSTART, vstart);
	POSTING_READ(VIDSTART);

	rgvmodectl |= MEMMODE_SWMODE_EN;
	I915_WRITE(MEMMODECTL, rgvmodectl);

6221
	if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
6222
		DRM_ERROR("stuck trying to change perf mode\n");
6223
	mdelay(1);
6224

6225
	ironlake_set_drps(dev_priv, fstart);
6226

6227 6228
	dev_priv->ips.last_count1 = I915_READ(DMIEC) +
		I915_READ(DDREC) + I915_READ(CSIEC);
6229
	dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
6230
	dev_priv->ips.last_count2 = I915_READ(GFXEC);
6231
	dev_priv->ips.last_time2 = ktime_get_raw_ns();
6232 6233

	spin_unlock_irq(&mchdev_lock);
6234 6235
}

6236
static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
6237
{
6238 6239 6240 6241 6242
	u16 rgvswctl;

	spin_lock_irq(&mchdev_lock);

	rgvswctl = I915_READ16(MEMSWCTL);
6243 6244 6245 6246 6247 6248 6249 6250 6251

	/* Ack interrupts, disable EFC interrupt */
	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
	I915_WRITE(DEIIR, DE_PCU_EVENT);
	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);

	/* Go back to the starting frequency */
6252
	ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
6253
	mdelay(1);
6254 6255
	rgvswctl |= MEMCTL_CMD_STS;
	I915_WRITE(MEMSWCTL, rgvswctl);
6256
	mdelay(1);
6257

6258
	spin_unlock_irq(&mchdev_lock);
6259 6260
}

6261 6262 6263 6264 6265
/* There's a funny hw issue where the hw returns all 0 when reading from
 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
 * ourselves, instead of doing a rmw cycle (which might result in us clearing
 * all limits and the gpu stuck at whatever frequency it is at atm).
 */
6266
static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
6267
{
6268
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
6269
	u32 limits;
6270

6271 6272 6273 6274 6275 6276
	/* Only set the down limit when we've reached the lowest level to avoid
	 * getting more interrupts, otherwise leave this clear. This prevents a
	 * race in the hw when coming out of rc6: There's a tiny window where
	 * the hw runs at the minimal clock before selecting the desired
	 * frequency, if the down threshold expires in that window we will not
	 * receive a down interrupt. */
6277
	if (INTEL_GEN(dev_priv) >= 9) {
6278 6279 6280
		limits = (rps->max_freq_softlimit) << 23;
		if (val <= rps->min_freq_softlimit)
			limits |= (rps->min_freq_softlimit) << 14;
6281
	} else {
6282 6283 6284
		limits = rps->max_freq_softlimit << 24;
		if (val <= rps->min_freq_softlimit)
			limits |= rps->min_freq_softlimit << 16;
6285
	}
6286 6287 6288 6289

	return limits;
}

C
Chris Wilson 已提交
6290
static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
6291
{
6292
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
6293 6294
	u32 threshold_up = 0, threshold_down = 0; /* in % */
	u32 ei_up = 0, ei_down = 0;
6295

C
Chris Wilson 已提交
6296
	lockdep_assert_held(&rps->power.mutex);
6297

C
Chris Wilson 已提交
6298
	if (new_power == rps->power.mode)
6299 6300 6301 6302 6303 6304
		return;

	/* Note the units here are not exactly 1us, but 1280ns. */
	switch (new_power) {
	case LOW_POWER:
		/* Upclock if more than 95% busy over 16ms */
6305 6306
		ei_up = 16000;
		threshold_up = 95;
6307 6308

		/* Downclock if less than 85% busy over 32ms */
6309 6310
		ei_down = 32000;
		threshold_down = 85;
6311 6312 6313 6314
		break;

	case BETWEEN:
		/* Upclock if more than 90% busy over 13ms */
6315 6316
		ei_up = 13000;
		threshold_up = 90;
6317 6318

		/* Downclock if less than 75% busy over 32ms */
6319 6320
		ei_down = 32000;
		threshold_down = 75;
6321 6322 6323 6324
		break;

	case HIGH_POWER:
		/* Upclock if more than 85% busy over 10ms */
6325 6326
		ei_up = 10000;
		threshold_up = 85;
6327 6328

		/* Downclock if less than 60% busy over 32ms */
6329 6330
		ei_down = 32000;
		threshold_down = 60;
6331 6332 6333
		break;
	}

6334 6335 6336 6337 6338 6339
	/* When byt can survive without system hang with dynamic
	 * sw freq adjustments, this restriction can be lifted.
	 */
	if (IS_VALLEYVIEW(dev_priv))
		goto skip_hw_write;

6340
	I915_WRITE(GEN6_RP_UP_EI,
6341
		   GT_INTERVAL_FROM_US(dev_priv, ei_up));
6342
	I915_WRITE(GEN6_RP_UP_THRESHOLD,
6343 6344
		   GT_INTERVAL_FROM_US(dev_priv,
				       ei_up * threshold_up / 100));
6345 6346

	I915_WRITE(GEN6_RP_DOWN_EI,
6347
		   GT_INTERVAL_FROM_US(dev_priv, ei_down));
6348
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
6349 6350 6351 6352 6353 6354 6355 6356 6357 6358
		   GT_INTERVAL_FROM_US(dev_priv,
				       ei_down * threshold_down / 100));

	I915_WRITE(GEN6_RP_CONTROL,
		   GEN6_RP_MEDIA_TURBO |
		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
		   GEN6_RP_MEDIA_IS_GFX |
		   GEN6_RP_ENABLE |
		   GEN6_RP_UP_BUSY_AVG |
		   GEN6_RP_DOWN_IDLE_AVG);
6359

6360
skip_hw_write:
C
Chris Wilson 已提交
6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404
	rps->power.mode = new_power;
	rps->power.up_threshold = threshold_up;
	rps->power.down_threshold = threshold_down;
}

static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
{
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	int new_power;

	new_power = rps->power.mode;
	switch (rps->power.mode) {
	case LOW_POWER:
		if (val > rps->efficient_freq + 1 &&
		    val > rps->cur_freq)
			new_power = BETWEEN;
		break;

	case BETWEEN:
		if (val <= rps->efficient_freq &&
		    val < rps->cur_freq)
			new_power = LOW_POWER;
		else if (val >= rps->rp0_freq &&
			 val > rps->cur_freq)
			new_power = HIGH_POWER;
		break;

	case HIGH_POWER:
		if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
		    val < rps->cur_freq)
			new_power = BETWEEN;
		break;
	}
	/* Max/min bins are special */
	if (val <= rps->min_freq_softlimit)
		new_power = LOW_POWER;
	if (val >= rps->max_freq_softlimit)
		new_power = HIGH_POWER;

	mutex_lock(&rps->power.mutex);
	if (rps->power.interactive)
		new_power = HIGH_POWER;
	rps_set_power(dev_priv, new_power);
	mutex_unlock(&rps->power.mutex);
6405 6406
}

C
Chris Wilson 已提交
6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424
void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
{
	struct intel_rps *rps = &i915->gt_pm.rps;

	if (INTEL_GEN(i915) < 6)
		return;

	mutex_lock(&rps->power.mutex);
	if (interactive) {
		if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
			rps_set_power(i915, HIGH_POWER);
	} else {
		GEM_BUG_ON(!rps->power.interactive);
		rps->power.interactive--;
	}
	mutex_unlock(&rps->power.mutex);
}

6425 6426
static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
6427
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
6428 6429
	u32 mask = 0;

6430
	/* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
6431
	if (val > rps->min_freq_softlimit)
6432
		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
6433
	if (val < rps->max_freq_softlimit)
6434
		mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
6435

6436 6437
	mask &= dev_priv->pm_rps_events;

6438
	return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
6439 6440
}

6441 6442 6443
/* gen6_set_rps is called to update the frequency request, but should also be
 * called when the range (min_delay and max_delay) is modified so that we can
 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
6444
static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
6445
{
6446 6447
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

C
Chris Wilson 已提交
6448 6449 6450
	/* min/max delay may still have been modified so be sure to
	 * write the limits value.
	 */
6451
	if (val != rps->cur_freq) {
C
Chris Wilson 已提交
6452
		gen6_set_rps_thresholds(dev_priv, val);
6453

6454
		if (INTEL_GEN(dev_priv) >= 9)
6455 6456
			I915_WRITE(GEN6_RPNSWREQ,
				   GEN9_FREQUENCY(val));
6457
		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
C
Chris Wilson 已提交
6458 6459 6460 6461 6462 6463 6464
			I915_WRITE(GEN6_RPNSWREQ,
				   HSW_FREQUENCY(val));
		else
			I915_WRITE(GEN6_RPNSWREQ,
				   GEN6_FREQUENCY(val) |
				   GEN6_OFFSET(0) |
				   GEN6_AGGRESSIVE_TURBO);
6465
	}
6466 6467 6468 6469

	/* Make sure we continue to get interrupts
	 * until we hit the minimum or maximum frequencies.
	 */
6470
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
6471
	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6472

6473
	rps->cur_freq = val;
6474
	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6475 6476

	return 0;
6477 6478
}

6479
static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
6480
{
6481 6482
	int err;

6483
	if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
6484 6485 6486
		      "Odd GPU freq value\n"))
		val &= ~1;

6487 6488
	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));

6489
	if (val != dev_priv->gt_pm.rps.cur_freq) {
6490 6491 6492 6493
		err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
		if (err)
			return err;

6494
		gen6_set_rps_thresholds(dev_priv, val);
6495
	}
6496

6497
	dev_priv->gt_pm.rps.cur_freq = val;
6498
	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6499 6500

	return 0;
6501 6502
}

6503
/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
6504 6505
 *
 * * If Gfx is Idle, then
6506 6507 6508
 * 1. Forcewake Media well.
 * 2. Request idle freq.
 * 3. Release Forcewake of Media well.
6509 6510 6511
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
6512 6513
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	u32 val = rps->idle_freq;
6514
	int err;
6515

6516
	if (rps->cur_freq <= val)
6517 6518
		return;

6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530
	/* The punit delays the write of the frequency and voltage until it
	 * determines the GPU is awake. During normal usage we don't want to
	 * waste power changing the frequency if the GPU is sleeping (rc6).
	 * However, the GPU and driver is now idle and we do not want to delay
	 * switching to minimum voltage (reducing power whilst idle) as we do
	 * not expect to be woken in the near future and so must flush the
	 * change by waking the device.
	 *
	 * We choose to take the media powerwell (either would do to trick the
	 * punit into committing the voltage change) as that takes a lot less
	 * power than the render powerwell.
	 */
6531
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
6532
	err = valleyview_set_rps(dev_priv, val);
6533
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
6534 6535 6536

	if (err)
		DRM_ERROR("Failed to set RPS for idle\n");
6537 6538
}

6539 6540
void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
6541 6542
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

6543
	mutex_lock(&dev_priv->pcu_lock);
6544
	if (rps->enabled) {
6545 6546
		u8 freq;

6547
		if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
6548 6549
			gen6_rps_reset_ei(dev_priv);
		I915_WRITE(GEN6_PMINTRMSK,
6550
			   gen6_rps_pm_mask(dev_priv, rps->cur_freq));
6551

6552 6553
		gen6_enable_rps_interrupts(dev_priv);

6554 6555 6556
		/* Use the user's desired frequency as a guide, but for better
		 * performance, jump directly to RPe as our starting frequency.
		 */
6557 6558
		freq = max(rps->cur_freq,
			   rps->efficient_freq);
6559

6560
		if (intel_set_rps(dev_priv,
6561
				  clamp(freq,
6562 6563
					rps->min_freq_softlimit,
					rps->max_freq_softlimit)))
6564
			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
6565
	}
6566
	mutex_unlock(&dev_priv->pcu_lock);
6567 6568
}

6569 6570
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
6571 6572
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

6573 6574 6575 6576 6577 6578 6579
	/* Flush our bottom-half so that it does not race with us
	 * setting the idle frequency and so that it is bounded by
	 * our rpm wakeref. And then disable the interrupts to stop any
	 * futher RPS reclocking whilst we are asleep.
	 */
	gen6_disable_rps_interrupts(dev_priv);

6580
	mutex_lock(&dev_priv->pcu_lock);
6581
	if (rps->enabled) {
6582
		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6583
			vlv_set_rps_idle(dev_priv);
6584
		else
6585 6586
			gen6_set_rps(dev_priv, rps->idle_freq);
		rps->last_adj = 0;
6587 6588
		I915_WRITE(GEN6_PMINTRMSK,
			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
6589
	}
6590
	mutex_unlock(&dev_priv->pcu_lock);
6591 6592
}

6593
void gen6_rps_boost(struct i915_request *rq,
6594
		    struct intel_rps_client *rps_client)
6595
{
6596
	struct intel_rps *rps = &rq->i915->gt_pm.rps;
6597
	unsigned long flags;
6598 6599
	bool boost;

6600 6601 6602
	/* This is intentionally racy! We peek at the state here, then
	 * validate inside the RPS worker.
	 */
6603
	if (!rps->enabled)
6604
		return;
6605

6606 6607 6608
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
		return;

6609
	/* Serializes with i915_request_retire() */
6610
	boost = false;
6611
	spin_lock_irqsave(&rq->lock, flags);
6612 6613
	if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
		boost = !atomic_fetch_inc(&rps->num_waiters);
6614
		rq->waitboost = true;
6615
	}
6616
	spin_unlock_irqrestore(&rq->lock, flags);
6617 6618 6619
	if (!boost)
		return;

6620 6621
	if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
		schedule_work(&rps->work);
6622

6623
	atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
6624 6625
}

6626
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
6627
{
6628
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
6629 6630
	int err;

6631
	lockdep_assert_held(&dev_priv->pcu_lock);
6632 6633
	GEM_BUG_ON(val > rps->max_freq);
	GEM_BUG_ON(val < rps->min_freq);
6634

6635 6636
	if (!rps->enabled) {
		rps->cur_freq = val;
6637 6638 6639
		return 0;
	}

6640
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6641
		err = valleyview_set_rps(dev_priv, val);
6642
	else
6643 6644 6645
		err = gen6_set_rps(dev_priv, val);

	return err;
6646 6647
}

6648
static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
Z
Zhe Wang 已提交
6649 6650
{
	I915_WRITE(GEN6_RC_CONTROL, 0);
6651
	I915_WRITE(GEN9_PG_ENABLE, 0);
Z
Zhe Wang 已提交
6652 6653
}

6654
static void gen9_disable_rps(struct drm_i915_private *dev_priv)
6655 6656 6657 6658
{
	I915_WRITE(GEN6_RP_CONTROL, 0);
}

6659
static void gen6_disable_rc6(struct drm_i915_private *dev_priv)
6660 6661
{
	I915_WRITE(GEN6_RC_CONTROL, 0);
6662 6663 6664 6665
}

static void gen6_disable_rps(struct drm_i915_private *dev_priv)
{
6666
	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6667
	I915_WRITE(GEN6_RP_CONTROL, 0);
6668 6669
}

6670
static void cherryview_disable_rc6(struct drm_i915_private *dev_priv)
6671 6672 6673 6674
{
	I915_WRITE(GEN6_RC_CONTROL, 0);
}

6675 6676 6677 6678 6679
static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
{
	I915_WRITE(GEN6_RP_CONTROL, 0);
}

6680
static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
6681
{
6682
	/* We're doing forcewake before Disabling RC6,
6683
	 * This what the BIOS expects when going into suspend */
6684
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6685

6686
	I915_WRITE(GEN6_RC_CONTROL, 0);
6687

6688
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
6689 6690
}

6691 6692 6693 6694 6695
static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
{
	I915_WRITE(GEN6_RP_CONTROL, 0);
}

6696
static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
6697 6698 6699
{
	bool enable_rc6 = true;
	unsigned long rc6_ctx_base;
6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710
	u32 rc_ctl;
	int rc_sw_target;

	rc_ctl = I915_READ(GEN6_RC_CONTROL);
	rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
		       RC_SW_TARGET_STATE_SHIFT;
	DRM_DEBUG_DRIVER("BIOS enabled RC states: "
			 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
			 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
			 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
			 rc_sw_target);
6711 6712

	if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
6713
		DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
6714 6715 6716 6717 6718 6719 6720 6721
		enable_rc6 = false;
	}

	/*
	 * The exact context size is not known for BXT, so assume a page size
	 * for this check.
	 */
	rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
6722 6723
	if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
	      (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
6724
		DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
6725 6726 6727 6728 6729 6730 6731
		enable_rc6 = false;
	}

	if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
	      ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
	      ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
	      ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
6732
		DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
6733 6734 6735
		enable_rc6 = false;
	}

6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749
	if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
	    !I915_READ(GEN8_PUSHBUS_ENABLE) ||
	    !I915_READ(GEN8_PUSHBUS_SHIFT)) {
		DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
		enable_rc6 = false;
	}

	if (!I915_READ(GEN6_GFXPAUSE)) {
		DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
		enable_rc6 = false;
	}

	if (!I915_READ(GEN8_MISC_CTRL0)) {
		DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
6750 6751 6752 6753 6754 6755
		enable_rc6 = false;
	}

	return enable_rc6;
}

6756
static bool sanitize_rc6(struct drm_i915_private *i915)
6757
{
6758
	struct intel_device_info *info = mkwrite_device_info(i915);
I
Imre Deak 已提交
6759

6760 6761 6762
	/* Powersaving is controlled by the host when inside a VM */
	if (intel_vgpu_active(i915))
		info->has_rc6 = 0;
6763

6764 6765
	if (info->has_rc6 &&
	    IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) {
6766
		DRM_INFO("RC6 disabled by BIOS\n");
6767
		info->has_rc6 = 0;
6768 6769
	}

6770 6771 6772 6773 6774 6775 6776 6777
	/*
	 * We assume that we do not have any deep rc6 levels if we don't have
	 * have the previous rc6 level supported, i.e. we use HAS_RC6()
	 * as the initial coarse check for rc6 in general, moving on to
	 * progressively finer/deeper levels.
	 */
	if (!info->has_rc6 && info->has_rc6p)
		info->has_rc6p = 0;
6778

6779
	return info->has_rc6;
6780 6781
}

6782
static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
6783
{
6784 6785
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

6786
	/* All of these values are in units of 50MHz */
6787

6788
	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
6789
	if (IS_GEN9_LP(dev_priv)) {
6790
		u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
6791 6792 6793
		rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
		rps->min_freq = (rp_state_cap >>  0) & 0xff;
6794
	} else {
6795
		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6796 6797 6798
		rps->rp0_freq = (rp_state_cap >>  0) & 0xff;
		rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
		rps->min_freq = (rp_state_cap >> 16) & 0xff;
6799
	}
6800
	/* hw_max = RP0 until we check for overclocking */
6801
	rps->max_freq = rps->rp0_freq;
6802

6803
	rps->efficient_freq = rps->rp1_freq;
6804
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
6805
	    IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
6806 6807 6808 6809 6810
		u32 ddcc_status = 0;

		if (sandybridge_pcode_read(dev_priv,
					   HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
					   &ddcc_status) == 0)
6811
			rps->efficient_freq =
6812 6813
				clamp_t(u8,
					((ddcc_status >> 8) & 0xff),
6814 6815
					rps->min_freq,
					rps->max_freq);
6816 6817
	}

6818
	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
6819
		/* Store the frequency values in 16.66 MHZ units, which is
6820 6821
		 * the natural hardware unit for SKL
		 */
6822 6823 6824 6825 6826
		rps->rp0_freq *= GEN9_FREQ_SCALER;
		rps->rp1_freq *= GEN9_FREQ_SCALER;
		rps->min_freq *= GEN9_FREQ_SCALER;
		rps->max_freq *= GEN9_FREQ_SCALER;
		rps->efficient_freq *= GEN9_FREQ_SCALER;
6827
	}
6828 6829
}

6830
static void reset_rps(struct drm_i915_private *dev_priv,
6831
		      int (*set)(struct drm_i915_private *, u8))
6832
{
6833 6834
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
	u8 freq = rps->cur_freq;
6835 6836

	/* force a reset */
C
Chris Wilson 已提交
6837
	rps->power.mode = -1;
6838
	rps->cur_freq = -1;
6839

6840 6841
	if (set(dev_priv, freq))
		DRM_ERROR("Failed to reset RPS to initial values\n");
6842 6843
}

J
Jesse Barnes 已提交
6844
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
6845
static void gen9_enable_rps(struct drm_i915_private *dev_priv)
J
Jesse Barnes 已提交
6846 6847 6848
{
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

6849 6850 6851 6852
	/* Program defaults and thresholds for RPS */
	if (IS_GEN9(dev_priv))
		I915_WRITE(GEN6_RC_VIDEO_FREQ,
			GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
6853 6854 6855 6856 6857

	/* 1 second timeout*/
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
		GT_INTERVAL_FROM_US(dev_priv, 1000000));

J
Jesse Barnes 已提交
6858 6859
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa);

6860 6861 6862
	/* Leaning on the below call to gen6_set_rps to program/setup the
	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
6863
	reset_rps(dev_priv, gen6_set_rps);
J
Jesse Barnes 已提交
6864 6865 6866 6867

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}

6868
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
Z
Zhe Wang 已提交
6869
{
6870
	struct intel_engine_cs *engine;
6871
	enum intel_engine_id id;
6872
	u32 rc6_mode;
Z
Zhe Wang 已提交
6873 6874 6875 6876 6877 6878

	/* 1a: Software RC state - RC0 */
	I915_WRITE(GEN6_RC_STATE, 0);

	/* 1b: Get forcewake during program sequence. Although the driver
	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6879
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
Z
Zhe Wang 已提交
6880 6881 6882 6883 6884

	/* 2a: Disable RC states. */
	I915_WRITE(GEN6_RC_CONTROL, 0);

	/* 2b: Program RC6 thresholds.*/
R
Rodrigo Vivi 已提交
6885 6886 6887 6888 6889 6890 6891 6892
	if (INTEL_GEN(dev_priv) >= 10) {
		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
		I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
	} else if (IS_SKYLAKE(dev_priv)) {
		/*
		 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
		 * when CPG is enabled
		 */
6893
		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
R
Rodrigo Vivi 已提交
6894
	} else {
6895
		I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
R
Rodrigo Vivi 已提交
6896 6897
	}

Z
Zhe Wang 已提交
6898 6899
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6900
	for_each_engine(engine, dev_priv, id)
6901
		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6902

6903
	if (HAS_GUC(dev_priv))
6904 6905
		I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);

Z
Zhe Wang 已提交
6906 6907
	I915_WRITE(GEN6_RC_SLEEP, 0);

6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930
	/*
	 * 2c: Program Coarse Power Gating Policies.
	 *
	 * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
	 * use instead is a more conservative estimate for the maximum time
	 * it takes us to service a CS interrupt and submit a new ELSP - that
	 * is the time which the GPU is idle waiting for the CPU to select the
	 * next request to execute. If the idle hysteresis is less than that
	 * interrupt service latency, the hardware will automatically gate
	 * the power well and we will then incur the wake up cost on top of
	 * the service latency. A similar guide from intel_pstate is that we
	 * do not want the enable hysteresis to less than the wakeup latency.
	 *
	 * igt/gem_exec_nop/sequential provides a rough estimate for the
	 * service latency, and puts it around 10us for Broadwell (and other
	 * big core) and around 40us for Broxton (and other low power cores).
	 * [Note that for legacy ringbuffer submission, this is less than 1us!]
	 * However, the wakeup latency on Broxton is closer to 100us. To be
	 * conservative, we have to factor in a context switch on top (due
	 * to ksoftirqd).
	 */
	I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
	I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
6931

Z
Zhe Wang 已提交
6932
	/* 3a: Enable RC6 */
6933
	I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
R
Rodrigo Vivi 已提交
6934 6935 6936 6937 6938 6939 6940

	/* WaRsUseTimeoutMode:cnl (pre-prod) */
	if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0))
		rc6_mode = GEN7_RC_CTL_TO_MODE;
	else
		rc6_mode = GEN6_RC_CTL_EI_MODE(1);

6941
	I915_WRITE(GEN6_RC_CONTROL,
6942 6943 6944
		   GEN6_RC_CTL_HW_ENABLE |
		   GEN6_RC_CTL_RC6_ENABLE |
		   rc6_mode);
Z
Zhe Wang 已提交
6945

6946 6947
	/*
	 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
6948
	 * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6.
6949
	 */
6950
	if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
6951 6952
		I915_WRITE(GEN9_PG_ENABLE, 0);
	else
6953 6954
		I915_WRITE(GEN9_PG_ENABLE,
			   GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
6955

6956
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
Z
Zhe Wang 已提交
6957 6958
}

6959
static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
6960
{
6961
	struct intel_engine_cs *engine;
6962
	enum intel_engine_id id;
6963 6964 6965 6966

	/* 1a: Software RC state - RC0 */
	I915_WRITE(GEN6_RC_STATE, 0);

6967
	/* 1b: Get forcewake during program sequence. Although the driver
6968
	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6969
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6970 6971 6972 6973 6974 6975 6976 6977

	/* 2a: Disable RC states. */
	I915_WRITE(GEN6_RC_CONTROL, 0);

	/* 2b: Program RC6 thresholds.*/
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6978
	for_each_engine(engine, dev_priv, id)
6979
		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
6980
	I915_WRITE(GEN6_RC_SLEEP, 0);
6981
	I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
6982 6983

	/* 3: Enable RC6 */
6984

6985 6986 6987 6988
	I915_WRITE(GEN6_RC_CONTROL,
		   GEN6_RC_CTL_HW_ENABLE |
		   GEN7_RC_CTL_TO_MODE |
		   GEN6_RC_CTL_RC6_ENABLE);
6989

6990 6991 6992 6993 6994
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}

static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
6995 6996
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

6997 6998 6999
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	/* 1 Program defaults and thresholds for RPS*/
7000
	I915_WRITE(GEN6_RPNSWREQ,
7001
		   HSW_FREQUENCY(rps->rp1_freq));
7002
	I915_WRITE(GEN6_RC_VIDEO_FREQ,
7003
		   HSW_FREQUENCY(rps->rp1_freq));
7004 7005 7006 7007 7008
	/* NB: Docs say 1s, and 1000000 - which aren't equivalent */
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */

	/* Docs recommend 900MHz, and 300 MHz respectively */
	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
7009 7010
		   rps->max_freq_softlimit << 24 |
		   rps->min_freq_softlimit << 16);
7011 7012 7013 7014 7015 7016 7017

	I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
	I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
	I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */

	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
7018

7019
	/* 2: Enable RPS */
7020 7021 7022 7023 7024 7025 7026 7027
	I915_WRITE(GEN6_RP_CONTROL,
		   GEN6_RP_MEDIA_TURBO |
		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
		   GEN6_RP_MEDIA_IS_GFX |
		   GEN6_RP_ENABLE |
		   GEN6_RP_UP_BUSY_AVG |
		   GEN6_RP_DOWN_IDLE_AVG);

7028
	reset_rps(dev_priv, gen6_set_rps);
7029

7030
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7031 7032
}

7033
static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
7034
{
7035
	struct intel_engine_cs *engine;
7036
	enum intel_engine_id id;
7037
	u32 rc6vids, rc6_mask;
7038
	u32 gtfifodbg;
7039
	int ret;
7040 7041 7042 7043

	I915_WRITE(GEN6_RC_STATE, 0);

	/* Clear the DBG now so we don't confuse earlier errors */
7044 7045
	gtfifodbg = I915_READ(GTFIFODBG);
	if (gtfifodbg) {
7046 7047 7048 7049
		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
		I915_WRITE(GTFIFODBG, gtfifodbg);
	}

7050
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7051 7052 7053 7054 7055 7056 7057 7058 7059 7060

	/* disable the counters and set deterministic thresholds */
	I915_WRITE(GEN6_RC_CONTROL, 0);

	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);

7061
	for_each_engine(engine, dev_priv, id)
7062
		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7063 7064 7065

	I915_WRITE(GEN6_RC_SLEEP, 0);
	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
7066
	if (IS_IVYBRIDGE(dev_priv))
7067 7068 7069
		I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
	else
		I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
7070
	I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
7071 7072
	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */

7073
	/* We don't use those on Haswell */
7074 7075 7076 7077 7078
	rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
	if (HAS_RC6p(dev_priv))
		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
	if (HAS_RC6pp(dev_priv))
		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
7079 7080 7081 7082 7083
	I915_WRITE(GEN6_RC_CONTROL,
		   rc6_mask |
		   GEN6_RC_CTL_EI_MODE(1) |
		   GEN6_RC_CTL_HW_ENABLE);

7084 7085
	rc6vids = 0;
	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
7086
	if (IS_GEN6(dev_priv) && ret) {
7087
		DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
7088
	} else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
7089 7090 7091 7092 7093 7094 7095 7096 7097
		DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
			  GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
		rc6vids &= 0xffff00;
		rc6vids |= GEN6_ENCODE_RC6_VID(450);
		ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
		if (ret)
			DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
	}

7098
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7099 7100
}

7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
	/* Here begins a magic sequence of register writes to enable
	 * auto-downclocking.
	 *
	 * Perhaps there might be some value in exposing these to
	 * userspace...
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	/* Power down if completely idle for over 50ms */
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);

	reset_rps(dev_priv, gen6_set_rps);

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}

7120
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7121
{
7122
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
7123 7124
	const int min_freq = 15;
	const int scaling_factor = 180;
7125 7126
	unsigned int gpu_freq;
	unsigned int max_ia_freq, min_ring_freq;
7127
	unsigned int max_gpu_freq, min_gpu_freq;
7128
	struct cpufreq_policy *policy;
7129

7130
	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
7131

7132 7133 7134
	if (rps->max_freq <= rps->min_freq)
		return;

7135 7136 7137 7138 7139 7140 7141 7142 7143
	policy = cpufreq_cpu_get(0);
	if (policy) {
		max_ia_freq = policy->cpuinfo.max_freq;
		cpufreq_cpu_put(policy);
	} else {
		/*
		 * Default to measured freq if none found, PCU will ensure we
		 * don't go over
		 */
7144
		max_ia_freq = tsc_khz;
7145
	}
7146 7147 7148 7149

	/* Convert from kHz to MHz */
	max_ia_freq /= 1000;

7150
	min_ring_freq = I915_READ(DCLK) & 0xf;
7151 7152
	/* convert DDR frequency from units of 266.6MHz to bandwidth */
	min_ring_freq = mult_frac(min_ring_freq, 8, 3);
7153

7154 7155
	min_gpu_freq = rps->min_freq;
	max_gpu_freq = rps->max_freq;
7156
	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7157
		/* Convert GT frequency to 50 HZ units */
7158 7159
		min_gpu_freq /= GEN9_FREQ_SCALER;
		max_gpu_freq /= GEN9_FREQ_SCALER;
7160 7161
	}

7162 7163 7164 7165 7166
	/*
	 * For each potential GPU frequency, load a ring frequency we'd like
	 * to use for memory access.  We do this by specifying the IA frequency
	 * the PCU should use as a reference to determine the ring frequency.
	 */
7167
	for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) {
7168
		const int diff = max_gpu_freq - gpu_freq;
7169 7170
		unsigned int ia_freq = 0, ring_freq = 0;

7171
		if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
7172 7173 7174 7175 7176
			/*
			 * ring_freq = 2 * GT. ring_freq is in 100MHz units
			 * No floor required for ring frequency on SKL.
			 */
			ring_freq = gpu_freq;
7177
		} else if (INTEL_GEN(dev_priv) >= 8) {
7178 7179
			/* max(2 * GT, DDR). NB: GT is 50MHz units */
			ring_freq = max(min_ring_freq, gpu_freq);
7180
		} else if (IS_HASWELL(dev_priv)) {
7181
			ring_freq = mult_frac(gpu_freq, 5, 4);
7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197
			ring_freq = max(min_ring_freq, ring_freq);
			/* leave ia_freq as the default, chosen by cpufreq */
		} else {
			/* On older processors, there is no separate ring
			 * clock domain, so in order to boost the bandwidth
			 * of the ring, we need to upclock the CPU (ia_freq).
			 *
			 * For GPU frequencies less than 750MHz,
			 * just use the lowest ring freq.
			 */
			if (gpu_freq < min_freq)
				ia_freq = 800;
			else
				ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
			ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
		}
7198

B
Ben Widawsky 已提交
7199 7200
		sandybridge_pcode_write(dev_priv,
					GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
7201 7202 7203
					ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
					ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
					gpu_freq);
7204 7205 7206
	}
}

7207
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
7208 7209 7210
{
	u32 val, rp0;

7211
	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
7212

7213
	switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227
	case 8:
		/* (2 * 4) config */
		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
		break;
	case 12:
		/* (2 * 6) config */
		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
		break;
	case 16:
		/* (2 * 8) config */
	default:
		/* Setting (2 * 8) Min RP0 for any other combination */
		rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
		break;
7228
	}
7229 7230 7231

	rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);

7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244
	return rp0;
}

static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
{
	u32 val, rpe;

	val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
	rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;

	return rpe;
}

7245 7246 7247 7248
static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
	u32 val, rp1;

7249 7250 7251
	val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
	rp1 = (val & FB_GFX_FREQ_FUSE_MASK);

7252 7253 7254
	return rp1;
}

7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265
static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
{
	u32 val, rpn;

	val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
	rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
		       FB_GFX_FREQ_FUSE_MASK);

	return rpn;
}

7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276
static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
	u32 val, rp1;

	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);

	rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;

	return rp1;
}

7277
static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
7278 7279 7280
{
	u32 val, rp0;

7281
	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293

	rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
	/* Clamp to max */
	rp0 = min_t(u32, rp0, 0xea);

	return rp0;
}

static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
{
	u32 val, rpe;

7294
	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
7295
	rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
7296
	val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
7297 7298 7299 7300 7301
	rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;

	return rpe;
}

7302
static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
7303
{
7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314
	u32 val;

	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
	/*
	 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
	 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
	 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
	 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
	 * to make sure it matches what Punit accepts.
	 */
	return max_t(u32, val, 0xc0);
7315 7316
}

7317 7318 7319 7320 7321
/* Check that the pctx buffer wasn't move under us. */
static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
{
	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;

7322
	WARN_ON(pctx_addr != dev_priv->dsm.start +
7323 7324 7325
			     dev_priv->vlv_pctx->stolen->start);
}

7326 7327 7328 7329 7330 7331 7332 7333 7334

/* Check that the pcbr address is not empty. */
static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
{
	unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;

	WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
}

7335
static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
7336
{
7337 7338
	resource_size_t pctx_paddr, paddr;
	resource_size_t pctx_size = 32*1024;
7339 7340 7341 7342
	u32 pcbr;

	pcbr = I915_READ(VLV_PCBR);
	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
7343
		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
7344 7345
		paddr = dev_priv->dsm.end + 1 - pctx_size;
		GEM_BUG_ON(paddr > U32_MAX);
7346 7347 7348 7349

		pctx_paddr = (paddr & (~4095));
		I915_WRITE(VLV_PCBR, pctx_paddr);
	}
7350 7351

	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7352 7353
}

7354
static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
7355 7356
{
	struct drm_i915_gem_object *pctx;
7357 7358
	resource_size_t pctx_paddr;
	resource_size_t pctx_size = 24*1024;
7359 7360 7361 7362 7363
	u32 pcbr;

	pcbr = I915_READ(VLV_PCBR);
	if (pcbr) {
		/* BIOS set it up already, grab the pre-alloc'd space */
7364
		resource_size_t pcbr_offset;
7365

7366
		pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
7367
		pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
7368
								      pcbr_offset,
7369
								      I915_GTT_OFFSET_NONE,
7370 7371 7372 7373
								      pctx_size);
		goto out;
	}

7374 7375
	DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");

7376 7377 7378 7379 7380 7381 7382 7383
	/*
	 * From the Gunit register HAS:
	 * The Gfx driver is expected to program this register and ensure
	 * proper allocation within Gfx stolen memory.  For example, this
	 * register should be programmed such than the PCBR range does not
	 * overlap with other ranges, such as the frame buffer, protected
	 * memory, or any other relevant ranges.
	 */
7384
	pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
7385 7386
	if (!pctx) {
		DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
7387
		goto out;
7388 7389
	}

7390 7391 7392 7393 7394
	GEM_BUG_ON(range_overflows_t(u64,
				     dev_priv->dsm.start,
				     pctx->stolen->start,
				     U32_MAX));
	pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
7395 7396 7397
	I915_WRITE(VLV_PCBR, pctx_paddr);

out:
7398
	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
7399 7400 7401
	dev_priv->vlv_pctx = pctx;
}

7402
static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
7403
{
7404
	struct drm_i915_gem_object *pctx;
7405

7406 7407 7408
	pctx = fetch_and_zero(&dev_priv->vlv_pctx);
	if (pctx)
		i915_gem_object_put(pctx);
7409 7410
}

7411 7412
static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
{
7413
	dev_priv->gt_pm.rps.gpll_ref_freq =
7414 7415 7416 7417 7418
		vlv_get_cck_clock(dev_priv, "GPLL ref",
				  CCK_GPLL_CLOCK_CONTROL,
				  dev_priv->czclk_freq);

	DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
7419
			 dev_priv->gt_pm.rps.gpll_ref_freq);
7420 7421
}

7422
static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
7423
{
7424
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
7425
	u32 val;
7426

7427
	valleyview_setup_pctx(dev_priv);
7428

7429 7430
	vlv_init_gpll_ref_freq(dev_priv);

7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
	switch ((val >> 6) & 3) {
	case 0:
	case 1:
		dev_priv->mem_freq = 800;
		break;
	case 2:
		dev_priv->mem_freq = 1066;
		break;
	case 3:
		dev_priv->mem_freq = 1333;
		break;
	}
7444
	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7445

7446 7447
	rps->max_freq = valleyview_rps_max_freq(dev_priv);
	rps->rp0_freq = rps->max_freq;
7448
	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7449 7450
			 intel_gpu_freq(dev_priv, rps->max_freq),
			 rps->max_freq);
7451

7452
	rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
7453
	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7454 7455
			 intel_gpu_freq(dev_priv, rps->efficient_freq),
			 rps->efficient_freq);
7456

7457
	rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
7458
	DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7459 7460
			 intel_gpu_freq(dev_priv, rps->rp1_freq),
			 rps->rp1_freq);
7461

7462
	rps->min_freq = valleyview_rps_min_freq(dev_priv);
7463
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7464 7465
			 intel_gpu_freq(dev_priv, rps->min_freq),
			 rps->min_freq);
7466 7467
}

7468
static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
7469
{
7470
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
7471
	u32 val;
7472

7473
	cherryview_setup_pctx(dev_priv);
7474

7475 7476
	vlv_init_gpll_ref_freq(dev_priv);

V
Ville Syrjälä 已提交
7477
	mutex_lock(&dev_priv->sb_lock);
7478
	val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
V
Ville Syrjälä 已提交
7479
	mutex_unlock(&dev_priv->sb_lock);
7480

7481 7482 7483 7484
	switch ((val >> 2) & 0x7) {
	case 3:
		dev_priv->mem_freq = 2000;
		break;
7485
	default:
7486 7487 7488
		dev_priv->mem_freq = 1600;
		break;
	}
7489
	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7490

7491 7492
	rps->max_freq = cherryview_rps_max_freq(dev_priv);
	rps->rp0_freq = rps->max_freq;
7493
	DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7494 7495
			 intel_gpu_freq(dev_priv, rps->max_freq),
			 rps->max_freq);
7496

7497
	rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
7498
	DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7499 7500
			 intel_gpu_freq(dev_priv, rps->efficient_freq),
			 rps->efficient_freq);
7501

7502
	rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
7503
	DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7504 7505
			 intel_gpu_freq(dev_priv, rps->rp1_freq),
			 rps->rp1_freq);
7506

7507
	rps->min_freq = cherryview_rps_min_freq(dev_priv);
7508
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7509 7510
			 intel_gpu_freq(dev_priv, rps->min_freq),
			 rps->min_freq);
7511

7512 7513
	WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
		   rps->min_freq) & 1,
7514
		  "Odd GPU freq values\n");
7515 7516
}

7517
static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
7518
{
7519
	valleyview_cleanup_pctx(dev_priv);
7520 7521
}

7522
static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
7523
{
7524
	struct intel_engine_cs *engine;
7525
	enum intel_engine_id id;
7526
	u32 gtfifodbg, rc6_mode, pcbr;
7527

7528 7529
	gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
					     GT_FIFO_FREE_ENTRIES_CHV);
7530 7531 7532 7533 7534 7535 7536 7537 7538 7539
	if (gtfifodbg) {
		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
				 gtfifodbg);
		I915_WRITE(GTFIFODBG, gtfifodbg);
	}

	cherryview_check_pctx(dev_priv);

	/* 1a & 1b: Get forcewake during program sequence. Although the driver
	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
7540
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7541

7542 7543 7544
	/*  Disable RC states. */
	I915_WRITE(GEN6_RC_CONTROL, 0);

7545 7546 7547 7548 7549
	/* 2a: Program RC6 thresholds.*/
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */

7550
	for_each_engine(engine, dev_priv, id)
7551
		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7552 7553
	I915_WRITE(GEN6_RC_SLEEP, 0);

7554 7555
	/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
	I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
7556

7557
	/* Allows RC6 residency counter to work */
7558 7559 7560 7561 7562 7563 7564 7565 7566
	I915_WRITE(VLV_COUNTER_CONTROL,
		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
				      VLV_MEDIA_RC6_COUNT_EN |
				      VLV_RENDER_RC6_COUNT_EN));

	/* For now we assume BIOS is allocating and populating the PCBR  */
	pcbr = I915_READ(VLV_PCBR);

	/* 3: Enable RC6 */
7567 7568
	rc6_mode = 0;
	if (pcbr >> VLV_PCBR_ADDR_SHIFT)
7569
		rc6_mode = GEN7_RC_CTL_TO_MODE;
7570 7571
	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);

7572 7573 7574 7575 7576 7577 7578 7579 7580 7581
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}

static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
	u32 val;

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	/* 1: Program defaults and thresholds for RPS*/
7582
	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
7583 7584 7585 7586 7587 7588 7589
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
	I915_WRITE(GEN6_RP_UP_EI, 66000);
	I915_WRITE(GEN6_RP_DOWN_EI, 350000);

	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);

7590
	/* 2: Enable RPS */
7591 7592
	I915_WRITE(GEN6_RP_CONTROL,
		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
7593
		   GEN6_RP_MEDIA_IS_GFX |
7594 7595 7596 7597
		   GEN6_RP_ENABLE |
		   GEN6_RP_UP_BUSY_AVG |
		   GEN6_RP_DOWN_IDLE_AVG);

D
Deepak S 已提交
7598 7599 7600 7601 7602 7603
	/* Setting Fixed Bias */
	val = VLV_OVERRIDE_EN |
		  VLV_SOC_TDP_EN |
		  CHV_BIAS_CPU_50_SOC_50;
	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);

7604 7605
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);

7606 7607 7608
	/* RPS code assumes GPLL is used */
	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");

7609
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7610 7611
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);

7612
	reset_rps(dev_priv, valleyview_set_rps);
7613

7614
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7615 7616
}

7617
static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
7618
{
7619
	struct intel_engine_cs *engine;
7620
	enum intel_engine_id id;
7621
	u32 gtfifodbg;
7622

7623 7624
	valleyview_check_pctx(dev_priv);

7625 7626
	gtfifodbg = I915_READ(GTFIFODBG);
	if (gtfifodbg) {
7627 7628
		DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
				 gtfifodbg);
7629 7630 7631
		I915_WRITE(GTFIFODBG, gtfifodbg);
	}

7632
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
7633

7634 7635 7636
	/*  Disable RC states. */
	I915_WRITE(GEN6_RC_CONTROL, 0);

7637 7638 7639 7640
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);

7641
	for_each_engine(engine, dev_priv, id)
7642
		I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
7643

7644
	I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
7645

7646
	/* Allows RC6 residency counter to work */
7647
	I915_WRITE(VLV_COUNTER_CONTROL,
7648 7649
		   _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
				      VLV_MEDIA_RC0_COUNT_EN |
7650
				      VLV_RENDER_RC0_COUNT_EN |
7651 7652
				      VLV_MEDIA_RC6_COUNT_EN |
				      VLV_RENDER_RC6_COUNT_EN));
7653

7654 7655
	I915_WRITE(GEN6_RC_CONTROL,
		   GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
7656

7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}

static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
	u32 val;

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
	I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
	I915_WRITE(GEN6_RP_UP_EI, 66000);
	I915_WRITE(GEN6_RP_DOWN_EI, 350000);

	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);

	I915_WRITE(GEN6_RP_CONTROL,
		   GEN6_RP_MEDIA_TURBO |
		   GEN6_RP_MEDIA_HW_NORMAL_MODE |
		   GEN6_RP_MEDIA_IS_GFX |
		   GEN6_RP_ENABLE |
		   GEN6_RP_UP_BUSY_AVG |
		   GEN6_RP_DOWN_IDLE_CONT);

D
Deepak S 已提交
7682 7683 7684 7685 7686 7687
	/* Setting Fixed Bias */
	val = VLV_OVERRIDE_EN |
		  VLV_SOC_TDP_EN |
		  VLV_BIAS_CPU_125_SOC_875;
	vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);

7688
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
7689

7690 7691 7692
	/* RPS code assumes GPLL is used */
	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");

7693
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
7694 7695
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);

7696
	reset_rps(dev_priv, valleyview_set_rps);
7697

7698
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
7699 7700
}

7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715
static unsigned long intel_pxfreq(u32 vidfreq)
{
	unsigned long freq;
	int div = (vidfreq & 0x3f0000) >> 16;
	int post = (vidfreq & 0x3000) >> 12;
	int pre = (vidfreq & 0x7);

	if (!pre)
		return 0;

	freq = ((div * 133333) / ((1<<post) * pre));

	return freq;
}

7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729
static const struct cparams {
	u16 i;
	u16 t;
	u16 m;
	u16 c;
} cparams[] = {
	{ 1, 1333, 301, 28664 },
	{ 1, 1066, 294, 24460 },
	{ 1, 800, 294, 25192 },
	{ 0, 1333, 276, 27605 },
	{ 0, 1066, 276, 27605 },
	{ 0, 800, 231, 23784 },
};

7730
static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
7731 7732 7733 7734 7735 7736
{
	u64 total_count, diff, ret;
	u32 count1, count2, count3, m = 0, c = 0;
	unsigned long now = jiffies_to_msecs(jiffies), diff1;
	int i;

7737
	lockdep_assert_held(&mchdev_lock);
7738

7739
	diff1 = now - dev_priv->ips.last_time1;
7740 7741 7742 7743 7744 7745 7746

	/* Prevent division-by-zero if we are asking too fast.
	 * Also, we don't get interesting results if we are polling
	 * faster than once in 10ms, so just return the saved value
	 * in such cases.
	 */
	if (diff1 <= 10)
7747
		return dev_priv->ips.chipset_power;
7748 7749 7750 7751 7752 7753 7754 7755

	count1 = I915_READ(DMIEC);
	count2 = I915_READ(DDREC);
	count3 = I915_READ(CSIEC);

	total_count = count1 + count2 + count3;

	/* FIXME: handle per-counter overflow */
7756 7757
	if (total_count < dev_priv->ips.last_count1) {
		diff = ~0UL - dev_priv->ips.last_count1;
7758 7759
		diff += total_count;
	} else {
7760
		diff = total_count - dev_priv->ips.last_count1;
7761 7762 7763
	}

	for (i = 0; i < ARRAY_SIZE(cparams); i++) {
7764 7765
		if (cparams[i].i == dev_priv->ips.c_m &&
		    cparams[i].t == dev_priv->ips.r_t) {
7766 7767 7768 7769 7770 7771 7772 7773 7774 7775
			m = cparams[i].m;
			c = cparams[i].c;
			break;
		}
	}

	diff = div_u64(diff, diff1);
	ret = ((m * diff) + c);
	ret = div_u64(ret, 10);

7776 7777
	dev_priv->ips.last_count1 = total_count;
	dev_priv->ips.last_time1 = now;
7778

7779
	dev_priv->ips.chipset_power = ret;
7780 7781 7782 7783

	return ret;
}

7784 7785 7786 7787
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
	unsigned long val;

7788
	if (!IS_GEN5(dev_priv))
7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799
		return 0;

	spin_lock_irq(&mchdev_lock);

	val = __i915_chipset_val(dev_priv);

	spin_unlock_irq(&mchdev_lock);

	return val;
}

7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
	unsigned long m, x, b;
	u32 tsfs;

	tsfs = I915_READ(TSFS);

	m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
	x = I915_READ8(TR1);

	b = tsfs & TSFS_INTR_MASK;

	return ((m * x) / 127) - b;
}

7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826
static int _pxvid_to_vd(u8 pxvid)
{
	if (pxvid == 0)
		return 0;

	if (pxvid >= 8 && pxvid < 31)
		pxvid = 31;

	return (pxvid + 2) * 125;
}

static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
7827
{
7828 7829 7830
	const int vd = _pxvid_to_vd(pxvid);
	const int vm = vd - 1125;

7831
	if (INTEL_INFO(dev_priv)->is_mobile)
7832 7833 7834
		return vm > 0 ? vm : 0;

	return vd;
7835 7836
}

7837
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
7838
{
7839
	u64 now, diff, diffms;
7840 7841
	u32 count;

7842
	lockdep_assert_held(&mchdev_lock);
7843

7844 7845 7846
	now = ktime_get_raw_ns();
	diffms = now - dev_priv->ips.last_time2;
	do_div(diffms, NSEC_PER_MSEC);
7847 7848 7849 7850 7851 7852 7853

	/* Don't divide by 0 */
	if (!diffms)
		return;

	count = I915_READ(GFXEC);

7854 7855
	if (count < dev_priv->ips.last_count2) {
		diff = ~0UL - dev_priv->ips.last_count2;
7856 7857
		diff += count;
	} else {
7858
		diff = count - dev_priv->ips.last_count2;
7859 7860
	}

7861 7862
	dev_priv->ips.last_count2 = count;
	dev_priv->ips.last_time2 = now;
7863 7864 7865 7866

	/* More magic constants... */
	diff = diff * 1181;
	diff = div_u64(diff, diffms * 10);
7867
	dev_priv->ips.gfx_power = diff;
7868 7869
}

7870 7871
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
7872
	if (!IS_GEN5(dev_priv))
7873 7874
		return;

7875
	spin_lock_irq(&mchdev_lock);
7876 7877 7878

	__i915_update_gfx_val(dev_priv);

7879
	spin_unlock_irq(&mchdev_lock);
7880 7881
}

7882
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
7883 7884 7885 7886
{
	unsigned long t, corr, state1, corr2, state2;
	u32 pxvid, ext_v;

7887
	lockdep_assert_held(&mchdev_lock);
7888

7889
	pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908
	pxvid = (pxvid >> 24) & 0x7f;
	ext_v = pvid_to_extvid(dev_priv, pxvid);

	state1 = ext_v;

	t = i915_mch_val(dev_priv);

	/* Revel in the empirically derived constants */

	/* Correction factor in 1/100000 units */
	if (t > 80)
		corr = ((t * 2349) + 135940);
	else if (t >= 50)
		corr = ((t * 964) + 29317);
	else /* < 50 */
		corr = ((t * 301) + 1004);

	corr = corr * ((150142 * state1) / 10000 - 78642);
	corr /= 100000;
7909
	corr2 = (corr * dev_priv->ips.corr);
7910 7911 7912 7913

	state2 = (corr2 * state1) / 10000;
	state2 /= 100; /* convert to mW */

7914
	__i915_update_gfx_val(dev_priv);
7915

7916
	return dev_priv->ips.gfx_power + state2;
7917 7918
}

7919 7920 7921 7922
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
	unsigned long val;

7923
	if (!IS_GEN5(dev_priv))
7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934
		return 0;

	spin_lock_irq(&mchdev_lock);

	val = __i915_gfx_val(dev_priv);

	spin_unlock_irq(&mchdev_lock);

	return val;
}

7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945
/**
 * i915_read_mch_val - return value for IPS use
 *
 * Calculate and return a value for the IPS driver to use when deciding whether
 * we have thermal and power headroom to increase CPU or GPU power budget.
 */
unsigned long i915_read_mch_val(void)
{
	struct drm_i915_private *dev_priv;
	unsigned long chipset_val, graphics_val, ret = 0;

7946
	spin_lock_irq(&mchdev_lock);
7947 7948 7949 7950
	if (!i915_mch_dev)
		goto out_unlock;
	dev_priv = i915_mch_dev;

7951 7952
	chipset_val = __i915_chipset_val(dev_priv);
	graphics_val = __i915_gfx_val(dev_priv);
7953 7954 7955 7956

	ret = chipset_val + graphics_val;

out_unlock:
7957
	spin_unlock_irq(&mchdev_lock);
7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972

	return ret;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);

/**
 * i915_gpu_raise - raise GPU frequency limit
 *
 * Raise the limit; IPS indicates we have thermal headroom.
 */
bool i915_gpu_raise(void)
{
	struct drm_i915_private *dev_priv;
	bool ret = true;

7973
	spin_lock_irq(&mchdev_lock);
7974 7975 7976 7977 7978 7979
	if (!i915_mch_dev) {
		ret = false;
		goto out_unlock;
	}
	dev_priv = i915_mch_dev;

7980 7981
	if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
		dev_priv->ips.max_delay--;
7982 7983

out_unlock:
7984
	spin_unlock_irq(&mchdev_lock);
7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000

	return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);

/**
 * i915_gpu_lower - lower GPU frequency limit
 *
 * IPS indicates we're close to a thermal limit, so throttle back the GPU
 * frequency maximum.
 */
bool i915_gpu_lower(void)
{
	struct drm_i915_private *dev_priv;
	bool ret = true;

8001
	spin_lock_irq(&mchdev_lock);
8002 8003 8004 8005 8006 8007
	if (!i915_mch_dev) {
		ret = false;
		goto out_unlock;
	}
	dev_priv = i915_mch_dev;

8008 8009
	if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
		dev_priv->ips.max_delay++;
8010 8011

out_unlock:
8012
	spin_unlock_irq(&mchdev_lock);
8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026

	return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);

/**
 * i915_gpu_busy - indicate GPU business to IPS
 *
 * Tell the IPS driver whether or not the GPU is busy.
 */
bool i915_gpu_busy(void)
{
	bool ret = false;

8027
	spin_lock_irq(&mchdev_lock);
8028 8029
	if (i915_mch_dev)
		ret = i915_mch_dev->gt.awake;
8030
	spin_unlock_irq(&mchdev_lock);
8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046

	return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);

/**
 * i915_gpu_turbo_disable - disable graphics turbo
 *
 * Disable graphics turbo by resetting the max frequency and setting the
 * current frequency to the default.
 */
bool i915_gpu_turbo_disable(void)
{
	struct drm_i915_private *dev_priv;
	bool ret = true;

8047
	spin_lock_irq(&mchdev_lock);
8048 8049 8050 8051 8052 8053
	if (!i915_mch_dev) {
		ret = false;
		goto out_unlock;
	}
	dev_priv = i915_mch_dev;

8054
	dev_priv->ips.max_delay = dev_priv->ips.fstart;
8055

8056
	if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
8057 8058 8059
		ret = false;

out_unlock:
8060
	spin_unlock_irq(&mchdev_lock);
8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087

	return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);

/**
 * Tells the intel_ips driver that the i915 driver is now loaded, if
 * IPS got loaded first.
 *
 * This awkward dance is so that neither module has to depend on the
 * other in order for IPS to do the appropriate communication of
 * GPU turbo limits to i915.
 */
static void
ips_ping_for_i915_load(void)
{
	void (*link)(void);

	link = symbol_get(ips_link_to_i915_driver);
	if (link) {
		link();
		symbol_put(ips_link_to_i915_driver);
	}
}

void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
8088 8089
	/* We only register the i915 ips part with intel-ips once everything is
	 * set up, to avoid intel-ips sneaking in and reading bogus values. */
8090
	spin_lock_irq(&mchdev_lock);
8091
	i915_mch_dev = dev_priv;
8092
	spin_unlock_irq(&mchdev_lock);
8093 8094 8095 8096 8097 8098

	ips_ping_for_i915_load();
}

void intel_gpu_ips_teardown(void)
{
8099
	spin_lock_irq(&mchdev_lock);
8100
	i915_mch_dev = NULL;
8101
	spin_unlock_irq(&mchdev_lock);
8102
}
8103

8104
static void intel_init_emon(struct drm_i915_private *dev_priv)
8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120
{
	u32 lcfuse;
	u8 pxw[16];
	int i;

	/* Disable to program */
	I915_WRITE(ECR, 0);
	POSTING_READ(ECR);

	/* Program energy weights for various events */
	I915_WRITE(SDEW, 0x15040d00);
	I915_WRITE(CSIEW0, 0x007f0000);
	I915_WRITE(CSIEW1, 0x1e220004);
	I915_WRITE(CSIEW2, 0x04000004);

	for (i = 0; i < 5; i++)
8121
		I915_WRITE(PEW(i), 0);
8122
	for (i = 0; i < 3; i++)
8123
		I915_WRITE(DEW(i), 0);
8124 8125 8126

	/* Program P-state weights to account for frequency power adjustment */
	for (i = 0; i < 16; i++) {
8127
		u32 pxvidfreq = I915_READ(PXVFREQ(i));
8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147
		unsigned long freq = intel_pxfreq(pxvidfreq);
		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
			PXVFREQ_PX_SHIFT;
		unsigned long val;

		val = vid * vid;
		val *= (freq / 1000);
		val *= 255;
		val /= (127*127*900);
		if (val > 0xff)
			DRM_ERROR("bad pxval: %ld\n", val);
		pxw[i] = val;
	}
	/* Render standby states get 0 weight */
	pxw[14] = 0;
	pxw[15] = 0;

	for (i = 0; i < 4; i++) {
		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8148
		I915_WRITE(PXW(i), val);
8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163
	}

	/* Adjust magic regs to magic values (more experimental results) */
	I915_WRITE(OGW0, 0);
	I915_WRITE(OGW1, 0);
	I915_WRITE(EG0, 0x00007f00);
	I915_WRITE(EG1, 0x0000000e);
	I915_WRITE(EG2, 0x000e0000);
	I915_WRITE(EG3, 0x68000300);
	I915_WRITE(EG4, 0x42000000);
	I915_WRITE(EG5, 0x00140031);
	I915_WRITE(EG6, 0);
	I915_WRITE(EG7, 0);

	for (i = 0; i < 8; i++)
8164
		I915_WRITE(PXWL(i), 0);
8165 8166 8167 8168 8169 8170

	/* Enable PMON + select events */
	I915_WRITE(ECR, 0x80000019);

	lcfuse = I915_READ(LCFUSE02);

8171
	dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
8172 8173
}

8174
void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
8175
{
8176 8177
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

8178 8179 8180 8181
	/*
	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
	 * requirement.
	 */
8182
	if (!sanitize_rc6(dev_priv)) {
8183 8184 8185
		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
		intel_runtime_pm_get(dev_priv);
	}
I
Imre Deak 已提交
8186

8187
	mutex_lock(&dev_priv->pcu_lock);
8188 8189

	/* Initialize RPS limits (for userspace) */
8190 8191 8192 8193
	if (IS_CHERRYVIEW(dev_priv))
		cherryview_init_gt_powersave(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv))
		valleyview_init_gt_powersave(dev_priv);
8194
	else if (INTEL_GEN(dev_priv) >= 6)
8195 8196 8197
		gen6_init_rps_frequencies(dev_priv);

	/* Derive initial user preferences/limits from the hardware limits */
8198 8199
	rps->idle_freq = rps->min_freq;
	rps->cur_freq = rps->idle_freq;
8200

8201 8202
	rps->max_freq_softlimit = rps->max_freq;
	rps->min_freq_softlimit = rps->min_freq;
8203 8204

	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
8205
		rps->min_freq_softlimit =
8206
			max_t(int,
8207
			      rps->efficient_freq,
8208 8209
			      intel_freq_opcode(dev_priv, 450));

8210 8211 8212 8213 8214 8215 8216 8217
	/* After setting max-softlimit, find the overclock max freq */
	if (IS_GEN6(dev_priv) ||
	    IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
		u32 params = 0;

		sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
		if (params & BIT(31)) { /* OC supported */
			DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
8218
					 (rps->max_freq & 0xff) * 50,
8219
					 (params & 0xff) * 50);
8220
			rps->max_freq = params & 0xff;
8221 8222 8223
		}
	}

8224
	/* Finally allow us to boost to max by default */
8225
	rps->boost_freq = rps->max_freq;
8226

8227
	mutex_unlock(&dev_priv->pcu_lock);
8228 8229
}

8230
void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
8231
{
8232
	if (IS_VALLEYVIEW(dev_priv))
8233
		valleyview_cleanup_gt_powersave(dev_priv);
8234

8235
	if (!HAS_RC6(dev_priv))
8236
		intel_runtime_pm_put(dev_priv);
8237 8238
}

8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254
/**
 * intel_suspend_gt_powersave - suspend PM work and helper threads
 * @dev_priv: i915 device
 *
 * We don't want to disable RC6 or other features here, we just want
 * to make sure any work we've queued has finished and won't bother
 * us while we're suspended.
 */
void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
{
	if (INTEL_GEN(dev_priv) < 6)
		return;

	/* gen6_rps_idle() will be called later to disable interrupts */
}

8255 8256
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
{
8257 8258
	dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
	dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
8259
	intel_disable_gt_powersave(dev_priv);
8260

8261 8262
	if (INTEL_GEN(dev_priv) >= 11)
		gen11_reset_rps_interrupts(dev_priv);
8263
	else if (INTEL_GEN(dev_priv) >= 6)
8264
		gen6_reset_rps_interrupts(dev_priv);
8265 8266
}

8267 8268 8269 8270
static inline void intel_disable_llc_pstate(struct drm_i915_private *i915)
{
	lockdep_assert_held(&i915->pcu_lock);

8271 8272 8273
	if (!i915->gt_pm.llc_pstate.enabled)
		return;

8274
	/* Currently there is no HW configuration to be done to disable. */
8275 8276

	i915->gt_pm.llc_pstate.enabled = false;
8277 8278
}

8279
static void intel_disable_rc6(struct drm_i915_private *dev_priv)
8280
{
8281
	lockdep_assert_held(&dev_priv->pcu_lock);
8282

8283 8284 8285
	if (!dev_priv->gt_pm.rc6.enabled)
		return;

8286 8287 8288 8289 8290 8291 8292 8293
	if (INTEL_GEN(dev_priv) >= 9)
		gen9_disable_rc6(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		cherryview_disable_rc6(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv))
		valleyview_disable_rc6(dev_priv);
	else if (INTEL_GEN(dev_priv) >= 6)
		gen6_disable_rc6(dev_priv);
8294 8295

	dev_priv->gt_pm.rc6.enabled = false;
8296
}
8297

8298 8299 8300
static void intel_disable_rps(struct drm_i915_private *dev_priv)
{
	lockdep_assert_held(&dev_priv->pcu_lock);
8301

8302 8303 8304
	if (!dev_priv->gt_pm.rps.enabled)
		return;

8305
	if (INTEL_GEN(dev_priv) >= 9)
8306
		gen9_disable_rps(dev_priv);
8307
	else if (IS_CHERRYVIEW(dev_priv))
8308
		cherryview_disable_rps(dev_priv);
8309
	else if (IS_VALLEYVIEW(dev_priv))
8310
		valleyview_disable_rps(dev_priv);
8311
	else if (INTEL_GEN(dev_priv) >= 6)
8312
		gen6_disable_rps(dev_priv);
8313
	else if (IS_IRONLAKE_M(dev_priv))
8314
		ironlake_disable_drps(dev_priv);
8315 8316

	dev_priv->gt_pm.rps.enabled = false;
8317 8318 8319 8320 8321
}

void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
	mutex_lock(&dev_priv->pcu_lock);
8322

8323 8324
	intel_disable_rc6(dev_priv);
	intel_disable_rps(dev_priv);
8325 8326 8327
	if (HAS_LLC(dev_priv))
		intel_disable_llc_pstate(dev_priv);

8328
	mutex_unlock(&dev_priv->pcu_lock);
8329 8330
}

8331 8332 8333 8334
static inline void intel_enable_llc_pstate(struct drm_i915_private *i915)
{
	lockdep_assert_held(&i915->pcu_lock);

8335 8336 8337
	if (i915->gt_pm.llc_pstate.enabled)
		return;

8338
	gen6_update_ring_freq(i915);
8339 8340

	i915->gt_pm.llc_pstate.enabled = true;
8341 8342
}

8343
static void intel_enable_rc6(struct drm_i915_private *dev_priv)
8344
{
8345
	lockdep_assert_held(&dev_priv->pcu_lock);
8346

8347 8348 8349
	if (dev_priv->gt_pm.rc6.enabled)
		return;

8350 8351 8352 8353 8354 8355 8356 8357 8358 8359
	if (IS_CHERRYVIEW(dev_priv))
		cherryview_enable_rc6(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv))
		valleyview_enable_rc6(dev_priv);
	else if (INTEL_GEN(dev_priv) >= 9)
		gen9_enable_rc6(dev_priv);
	else if (IS_BROADWELL(dev_priv))
		gen8_enable_rc6(dev_priv);
	else if (INTEL_GEN(dev_priv) >= 6)
		gen6_enable_rc6(dev_priv);
8360 8361

	dev_priv->gt_pm.rc6.enabled = true;
8362
}
8363

8364 8365 8366
static void intel_enable_rps(struct drm_i915_private *dev_priv)
{
	struct intel_rps *rps = &dev_priv->gt_pm.rps;
8367

8368
	lockdep_assert_held(&dev_priv->pcu_lock);
8369

8370 8371 8372
	if (rps->enabled)
		return;

8373 8374 8375 8376
	if (IS_CHERRYVIEW(dev_priv)) {
		cherryview_enable_rps(dev_priv);
	} else if (IS_VALLEYVIEW(dev_priv)) {
		valleyview_enable_rps(dev_priv);
8377
	} else if (INTEL_GEN(dev_priv) >= 9) {
8378 8379 8380
		gen9_enable_rps(dev_priv);
	} else if (IS_BROADWELL(dev_priv)) {
		gen8_enable_rps(dev_priv);
8381
	} else if (INTEL_GEN(dev_priv) >= 6) {
8382
		gen6_enable_rps(dev_priv);
8383 8384 8385
	} else if (IS_IRONLAKE_M(dev_priv)) {
		ironlake_enable_drps(dev_priv);
		intel_init_emon(dev_priv);
8386
	}
8387

8388 8389
	WARN_ON(rps->max_freq < rps->min_freq);
	WARN_ON(rps->idle_freq > rps->max_freq);
8390

8391 8392
	WARN_ON(rps->efficient_freq < rps->min_freq);
	WARN_ON(rps->efficient_freq > rps->max_freq);
8393 8394

	rps->enabled = true;
8395 8396 8397 8398 8399 8400 8401 8402 8403 8404
}

void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
{
	/* Powersaving is controlled by the host when inside a VM */
	if (intel_vgpu_active(dev_priv))
		return;

	mutex_lock(&dev_priv->pcu_lock);

8405 8406
	if (HAS_RC6(dev_priv))
		intel_enable_rc6(dev_priv);
8407 8408 8409
	intel_enable_rps(dev_priv);
	if (HAS_LLC(dev_priv))
		intel_enable_llc_pstate(dev_priv);
8410

8411
	mutex_unlock(&dev_priv->pcu_lock);
8412
}
I
Imre Deak 已提交
8413

8414
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
8415 8416 8417 8418 8419 8420 8421 8422 8423
{
	/*
	 * On Ibex Peak and Cougar Point, we need to disable clock
	 * gating for the panel power sequencer or it will fail to
	 * start up when no ports are active.
	 */
	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}

8424
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
8425
{
8426
	enum pipe pipe;
8427

8428
	for_each_pipe(dev_priv, pipe) {
8429 8430 8431
		I915_WRITE(DSPCNTR(pipe),
			   I915_READ(DSPCNTR(pipe)) |
			   DISPPLANE_TRICKLE_FEED_DISABLE);
8432 8433 8434

		I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
		POSTING_READ(DSPSURF(pipe));
8435 8436 8437
	}
}

8438
static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
8439
{
8440
	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8441

8442 8443 8444 8445
	/*
	 * Required for FBC
	 * WaFbcDisableDpfcClockGating:ilk
	 */
8446 8447 8448
	dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
		   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465

	I915_WRITE(PCH_3DCGDIS0,
		   MARIUNIT_CLOCK_GATE_DISABLE |
		   SVSMUNIT_CLOCK_GATE_DISABLE);
	I915_WRITE(PCH_3DCGDIS1,
		   VFMUNIT_CLOCK_GATE_DISABLE);

	/*
	 * According to the spec the following bits should be set in
	 * order to enable memory self-refresh
	 * The bit 22/21 of 0x42004
	 * The bit 5 of 0x42020
	 * The bit 15 of 0x45000
	 */
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8466
	dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
8467 8468 8469
	I915_WRITE(DISP_ARB_CTL,
		   (I915_READ(DISP_ARB_CTL) |
		    DISP_FBC_WM_DIS));
8470

8471 8472 8473 8474 8475 8476 8477
	/*
	 * Based on the document from hardware guys the following bits
	 * should be set unconditionally in order to enable FBC.
	 * The bit 22 of 0x42000
	 * The bit 22 of 0x42004
	 * The bit 7,8,9 of 0x42020.
	 */
8478
	if (IS_IRONLAKE_M(dev_priv)) {
8479
		/* WaFbcAsynchFlipDisableFbcQueue:ilk */
8480 8481 8482 8483 8484 8485 8486 8487
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
			   ILK_FBCQ_DIS);
		I915_WRITE(ILK_DISPLAY_CHICKEN2,
			   I915_READ(ILK_DISPLAY_CHICKEN2) |
			   ILK_DPARB_GATE);
	}

8488 8489
	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);

8490 8491 8492 8493 8494 8495
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
		   ILK_ELPIN_409_SELECT);
	I915_WRITE(_3D_CHICKEN2,
		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
		   _3D_CHICKEN2_WM_READ_PIPELINED);
8496

8497
	/* WaDisableRenderCachePipelinedFlush:ilk */
8498 8499
	I915_WRITE(CACHE_MODE_0,
		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
8500

8501 8502 8503
	/* WaDisable_RenderCache_OperationalFlush:ilk */
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));

8504
	g4x_disable_trickle_feed(dev_priv);
8505

8506
	ibx_init_clock_gating(dev_priv);
8507 8508
}

8509
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
8510 8511
{
	int pipe;
8512
	uint32_t val;
8513 8514 8515 8516 8517 8518

	/*
	 * On Ibex Peak and Cougar Point, we need to disable clock
	 * gating for the panel power sequencer or it will fail to
	 * start up when no ports are active.
	 */
8519 8520 8521
	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
		   PCH_DPLUNIT_CLOCK_GATE_DISABLE |
		   PCH_CPUNIT_CLOCK_GATE_DISABLE);
8522 8523
	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
		   DPLS_EDP_PPS_FIX_DIS);
8524 8525 8526
	/* The below fixes the weird display corruption, a few pixels shifted
	 * downward, on (only) LVDS of some HP laptops with IVY.
	 */
8527
	for_each_pipe(dev_priv, pipe) {
8528 8529 8530
		val = I915_READ(TRANS_CHICKEN2(pipe));
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8531
		if (dev_priv->vbt.fdi_rx_polarity_inverted)
8532
			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
8533 8534 8535
		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
8536 8537
		I915_WRITE(TRANS_CHICKEN2(pipe), val);
	}
8538
	/* WADP0ClockGatingDisable */
8539
	for_each_pipe(dev_priv, pipe) {
8540 8541 8542
		I915_WRITE(TRANS_CHICKEN1(pipe),
			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
	}
8543 8544
}

8545
static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
8546 8547 8548 8549
{
	uint32_t tmp;

	tmp = I915_READ(MCH_SSKPD);
8550 8551 8552
	if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
		DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
			      tmp);
8553 8554
}

8555
static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
8556
{
8557
	uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
8558

8559
	I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
8560 8561 8562 8563 8564

	I915_WRITE(ILK_DISPLAY_CHICKEN2,
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
		   ILK_ELPIN_409_SELECT);

8565
	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
8566 8567 8568
	I915_WRITE(_3D_CHICKEN,
		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));

8569 8570 8571
	/* WaDisable_RenderCache_OperationalFlush:snb */
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));

8572 8573 8574
	/*
	 * BSpec recoomends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
8575 8576 8577 8578
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8579 8580
	 */
	I915_WRITE(GEN6_GT_MODE,
8581
		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8582

8583
	I915_WRITE(CACHE_MODE_0,
8584
		   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599

	I915_WRITE(GEN6_UCGCTL1,
		   I915_READ(GEN6_UCGCTL1) |
		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);

	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
	 * gating disable must be set.  Failure to set it results in
	 * flickering pixels due to Z write ordering failures after
	 * some amount of runtime in the Mesa "fire" demo, and Unigine
	 * Sanctuary and Tropics, and apparently anything else with
	 * alpha test or pixel discard.
	 *
	 * According to the spec, bit 11 (RCCUNIT) must also be set,
	 * but we didn't debug actual testcases to find it out.
8600
	 *
8601 8602
	 * WaDisableRCCUnitClockGating:snb
	 * WaDisableRCPBUnitClockGating:snb
8603 8604 8605 8606 8607
	 */
	I915_WRITE(GEN6_UCGCTL2,
		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);

8608
	/* WaStripsFansDisableFastClipPerformanceFix:snb */
8609 8610
	I915_WRITE(_3D_CHICKEN3,
		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
8611

8612 8613 8614 8615 8616 8617 8618 8619
	/*
	 * Bspec says:
	 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
	 * 3DSTATE_SF number of SF output attributes is more than 16."
	 */
	I915_WRITE(_3D_CHICKEN3,
		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));

8620 8621 8622 8623 8624 8625 8626 8627
	/*
	 * According to the spec the following bits should be
	 * set in order to enable memory self-refresh and fbc:
	 * The bit21 and bit22 of 0x42000
	 * The bit21 and bit22 of 0x42004
	 * The bit5 and bit7 of 0x42020
	 * The bit14 of 0x70180
	 * The bit14 of 0x71180
8628 8629
	 *
	 * WaFbcAsynchFlipDisableFbcQueue:snb
8630 8631 8632 8633 8634 8635 8636
	 */
	I915_WRITE(ILK_DISPLAY_CHICKEN1,
		   I915_READ(ILK_DISPLAY_CHICKEN1) |
		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
	I915_WRITE(ILK_DISPLAY_CHICKEN2,
		   I915_READ(ILK_DISPLAY_CHICKEN2) |
		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8637 8638 8639 8640
	I915_WRITE(ILK_DSPCLK_GATE_D,
		   I915_READ(ILK_DSPCLK_GATE_D) |
		   ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
		   ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
8641

8642
	g4x_disable_trickle_feed(dev_priv);
B
Ben Widawsky 已提交
8643

8644
	cpt_init_clock_gating(dev_priv);
8645

8646
	gen6_check_mch_setup(dev_priv);
8647 8648 8649 8650 8651 8652
}

static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
{
	uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);

8653
	/*
8654
	 * WaVSThreadDispatchOverride:ivb,vlv
8655 8656 8657 8658
	 *
	 * This actually overrides the dispatch
	 * mode for all thread types.
	 */
8659 8660 8661 8662 8663 8664 8665 8666
	reg &= ~GEN7_FF_SCHED_MASK;
	reg |= GEN7_FF_TS_SCHED_HW;
	reg |= GEN7_FF_VS_SCHED_HW;
	reg |= GEN7_FF_DS_SCHED_HW;

	I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}

8667
static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
8668 8669 8670 8671 8672
{
	/*
	 * TODO: this bit should only be enabled when really needed, then
	 * disabled when not needed anymore in order to save power.
	 */
8673
	if (HAS_PCH_LPT_LP(dev_priv))
8674 8675 8676
		I915_WRITE(SOUTH_DSPCLK_GATE_D,
			   I915_READ(SOUTH_DSPCLK_GATE_D) |
			   PCH_LP_PARTITION_LEVEL_DISABLE);
8677 8678

	/* WADPOClockGatingDisable:hsw */
8679 8680
	I915_WRITE(TRANS_CHICKEN1(PIPE_A),
		   I915_READ(TRANS_CHICKEN1(PIPE_A)) |
8681
		   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
8682 8683
}

8684
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
8685
{
8686
	if (HAS_PCH_LPT_LP(dev_priv)) {
8687 8688 8689 8690 8691 8692 8693
		uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);

		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
	}
}

8694 8695 8696 8697 8698
static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
				   int general_prio_credits,
				   int high_prio_credits)
{
	u32 misccpctl;
8699
	u32 val;
8700 8701 8702 8703 8704

	/* WaTempDisableDOPClkGating:bdw */
	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);

8705 8706 8707 8708 8709
	val = I915_READ(GEN8_L3SQCREG1);
	val &= ~L3_PRIO_CREDITS_MASK;
	val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
	val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
	I915_WRITE(GEN8_L3SQCREG1, val);
8710 8711 8712 8713 8714 8715 8716 8717 8718 8719

	/*
	 * Wait at least 100 clocks before re-enabling clock gating.
	 * See the definition of L3SQCREG1 in BSpec.
	 */
	POSTING_READ(GEN8_L3SQCREG1);
	udelay(1);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}

O
Oscar Mateo 已提交
8720 8721 8722 8723 8724 8725 8726
static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
{
	/* This is not an Wa. Enable to reduce Sampler power */
	I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
		   I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
}

8727 8728 8729 8730 8731
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
	if (!HAS_PCH_CNP(dev_priv))
		return;

8732
	/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
8733 8734
	I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
		   CNP_PWM_CGE_GATING_DISABLE);
8735 8736
}

8737
static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
8738
{
8739
	u32 val;
8740 8741
	cnp_init_clock_gating(dev_priv);

8742 8743 8744 8745
	/* This is not an Wa. Enable for better image quality */
	I915_WRITE(_3D_CHICKEN3,
		   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));

8746 8747 8748 8749 8750 8751 8752 8753
	/* WaEnableChickenDCPR:cnl */
	I915_WRITE(GEN8_CHICKEN_DCPR_1,
		   I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);

	/* WaFbcWakeMemOn:cnl */
	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
		   DISP_FBC_MEMORY_WAKE);

8754 8755 8756
	val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
	/* ReadHitWriteOnlyDisable:cnl */
	val |= RCCUNIT_CLKGATE_DIS;
8757 8758
	/* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
	if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
8759 8760
		val |= SARBUNIT_CLKGATE_DIS;
	I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
8761

R
Rodrigo Vivi 已提交
8762 8763 8764 8765 8766
	/* Wa_2201832410:cnl */
	val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE);
	val |= GWUNIT_CLKGATE_DIS;
	I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val);

8767
	/* WaDisableVFclkgate:cnl */
8768
	/* WaVFUnitClockGatingDisable:cnl */
8769 8770 8771
	val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
	val |= VFUNIT_CLKGATE_DIS;
	I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
8772 8773
}

8774 8775 8776 8777 8778 8779 8780 8781 8782 8783
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
{
	cnp_init_clock_gating(dev_priv);
	gen9_init_clock_gating(dev_priv);

	/* WaFbcNukeOnHostModify:cfl */
	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}

8784
static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
8785
{
8786
	gen9_init_clock_gating(dev_priv);
8787 8788 8789 8790 8791

	/* WaDisableSDEUnitClockGating:kbl */
	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
		I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
			   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8792 8793 8794 8795 8796

	/* WaDisableGamClockGating:kbl */
	if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
		I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
			   GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
8797

8798
	/* WaFbcNukeOnHostModify:kbl */
8799 8800
	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8801 8802
}

8803
static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
8804
{
8805
	gen9_init_clock_gating(dev_priv);
8806 8807 8808 8809

	/* WAC6entrylatency:skl */
	I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
		   FBC_LLC_FULLY_OPEN);
8810 8811 8812 8813

	/* WaFbcNukeOnHostModify:skl */
	I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
		   ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
8814 8815
}

8816
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
8817
{
8818 8819 8820
	/* The GTT cache must be disabled if the system is using 2M pages. */
	bool can_use_gtt_cache = !HAS_PAGE_SIZES(dev_priv,
						 I915_GTT_PAGE_SIZE_2M);
8821
	enum pipe pipe;
B
Ben Widawsky 已提交
8822

8823
	/* WaSwitchSolVfFArbitrationPriority:bdw */
8824
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
8825

8826
	/* WaPsrDPAMaskVBlankInSRD:bdw */
8827 8828 8829
	I915_WRITE(CHICKEN_PAR1_1,
		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);

8830
	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
8831
	for_each_pipe(dev_priv, pipe) {
8832
		I915_WRITE(CHICKEN_PIPESL_1(pipe),
8833
			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
8834
			   BDW_DPRS_MASK_VBLANK_SRD);
8835
	}
8836

8837 8838 8839 8840 8841
	/* WaVSRefCountFullforceMissDisable:bdw */
	/* WaDSRefCountFullforceMissDisable:bdw */
	I915_WRITE(GEN7_FF_THREAD_MODE,
		   I915_READ(GEN7_FF_THREAD_MODE) &
		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
8842

8843 8844
	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
8845 8846 8847 8848

	/* WaDisableSDEUnitClockGating:bdw */
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
8849

8850 8851
	/* WaProgramL3SqcReg1Default:bdw */
	gen8_set_l3sqc_credits(dev_priv, 30, 2);
8852

8853 8854
	/* WaGttCachingOffByDefault:bdw */
	I915_WRITE(HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
8855

8856 8857 8858 8859
	/* WaKVMNotificationOnConfigChange:bdw */
	I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
		   | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);

8860
	lpt_init_clock_gating(dev_priv);
8861 8862 8863 8864 8865 8866 8867 8868

	/* WaDisableDopClockGating:bdw
	 *
	 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
	 * clock gating.
	 */
	I915_WRITE(GEN6_UCGCTL1,
		   I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
B
Ben Widawsky 已提交
8869 8870
}

8871
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
8872
{
8873 8874 8875 8876 8877
	/* L3 caching of data atomics doesn't work -- disable it. */
	I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
	I915_WRITE(HSW_ROW_CHICKEN3,
		   _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));

8878
	/* This is required by WaCatErrorRejectionIssue:hsw */
8879 8880 8881 8882
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);

8883 8884 8885
	/* WaVSRefCountFullforceMissDisable:hsw */
	I915_WRITE(GEN7_FF_THREAD_MODE,
		   I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
8886

8887 8888 8889
	/* WaDisable_RenderCache_OperationalFlush:hsw */
	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));

8890 8891 8892 8893
	/* enable HiZ Raw Stall Optimization */
	I915_WRITE(CACHE_MODE_0_GEN7,
		   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));

8894
	/* WaDisable4x2SubspanOptimization:hsw */
8895 8896
	I915_WRITE(CACHE_MODE_1,
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8897

8898 8899 8900
	/*
	 * BSpec recommends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
8901 8902 8903 8904
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8905 8906
	 */
	I915_WRITE(GEN7_GT_MODE,
8907
		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
8908

8909 8910 8911 8912
	/* WaSampleCChickenBitEnable:hsw */
	I915_WRITE(HALF_SLICE_CHICKEN3,
		   _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));

8913
	/* WaSwitchSolVfFArbitrationPriority:hsw */
8914 8915
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);

8916
	lpt_init_clock_gating(dev_priv);
8917 8918
}

8919
static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
8920
{
8921
	uint32_t snpcr;
8922

8923
	I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
8924

8925
	/* WaDisableEarlyCull:ivb */
8926 8927 8928
	I915_WRITE(_3D_CHICKEN3,
		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));

8929
	/* WaDisableBackToBackFlipFix:ivb */
8930 8931 8932 8933
	I915_WRITE(IVB_CHICKEN3,
		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
		   CHICKEN3_DGMG_DONE_FIX_DISABLE);

8934
	/* WaDisablePSDDualDispatchEnable:ivb */
8935
	if (IS_IVB_GT1(dev_priv))
8936 8937 8938
		I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
			   _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));

8939 8940 8941
	/* WaDisable_RenderCache_OperationalFlush:ivb */
	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));

8942
	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
8943 8944 8945
	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);

8946
	/* WaApplyL3ControlAndL3ChickenMode:ivb */
8947 8948 8949
	I915_WRITE(GEN7_L3CNTLREG1,
			GEN7_WA_FOR_GEN7_L3_CONTROL);
	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8950
		   GEN7_WA_L3_CHICKEN_MODE);
8951
	if (IS_IVB_GT1(dev_priv))
8952 8953
		I915_WRITE(GEN7_ROW_CHICKEN2,
			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8954 8955 8956 8957
	else {
		/* must write both registers */
		I915_WRITE(GEN7_ROW_CHICKEN2,
			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8958 8959
		I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
			   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
8960
	}
8961

8962
	/* WaForceL3Serialization:ivb */
8963 8964 8965
	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);

8966
	/*
8967
	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8968
	 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
8969 8970
	 */
	I915_WRITE(GEN6_UCGCTL2,
8971
		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8972

8973
	/* This is required by WaCatErrorRejectionIssue:ivb */
8974 8975 8976 8977
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);

8978
	g4x_disable_trickle_feed(dev_priv);
8979 8980

	gen7_setup_fixed_func_scheduler(dev_priv);
8981

8982 8983 8984 8985 8986
	if (0) { /* causes HiZ corruption on ivb:gt1 */
		/* enable HiZ Raw Stall Optimization */
		I915_WRITE(CACHE_MODE_0_GEN7,
			   _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
	}
8987

8988
	/* WaDisable4x2SubspanOptimization:ivb */
8989 8990
	I915_WRITE(CACHE_MODE_1,
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
8991

8992 8993 8994
	/*
	 * BSpec recommends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
8995 8996 8997 8998
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
8999 9000
	 */
	I915_WRITE(GEN7_GT_MODE,
9001
		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
9002

9003 9004 9005 9006
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
	snpcr |= GEN6_MBC_SNPCR_MED;
	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
9007

9008
	if (!HAS_PCH_NOP(dev_priv))
9009
		cpt_init_clock_gating(dev_priv);
9010

9011
	gen6_check_mch_setup(dev_priv);
9012 9013
}

9014
static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
9015
{
9016
	/* WaDisableEarlyCull:vlv */
9017 9018 9019
	I915_WRITE(_3D_CHICKEN3,
		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));

9020
	/* WaDisableBackToBackFlipFix:vlv */
9021 9022 9023 9024
	I915_WRITE(IVB_CHICKEN3,
		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
		   CHICKEN3_DGMG_DONE_FIX_DISABLE);

9025
	/* WaPsdDispatchEnable:vlv */
9026
	/* WaDisablePSDDualDispatchEnable:vlv */
9027
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
9028 9029
		   _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
				      GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
9030

9031 9032 9033
	/* WaDisable_RenderCache_OperationalFlush:vlv */
	I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));

9034
	/* WaForceL3Serialization:vlv */
9035 9036 9037
	I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
		   ~L3SQ_URB_READ_CAM_MATCH_DISABLE);

9038
	/* WaDisableDopClockGating:vlv */
9039 9040 9041
	I915_WRITE(GEN7_ROW_CHICKEN2,
		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));

9042
	/* This is required by WaCatErrorRejectionIssue:vlv */
9043 9044 9045 9046
	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
		   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
		   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);

9047 9048
	gen7_setup_fixed_func_scheduler(dev_priv);

9049
	/*
9050
	 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
9051
	 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
9052 9053
	 */
	I915_WRITE(GEN6_UCGCTL2,
9054
		   GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
9055

9056 9057 9058 9059 9060
	/* WaDisableL3Bank2xClockGate:vlv
	 * Disabling L3 clock gating- MMIO 940c[25] = 1
	 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
	I915_WRITE(GEN7_UCGCTL4,
		   I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
9061

9062 9063 9064 9065
	/*
	 * BSpec says this must be set, even though
	 * WaDisable4x2SubspanOptimization isn't listed for VLV.
	 */
9066 9067
	I915_WRITE(CACHE_MODE_1,
		   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
9068

9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079
	/*
	 * BSpec recommends 8x4 when MSAA is used,
	 * however in practice 16x4 seems fastest.
	 *
	 * Note that PS/WM thread counts depend on the WIZ hashing
	 * disable bit, which we don't touch here, but it's good
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
	 */
	I915_WRITE(GEN7_GT_MODE,
		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));

9080 9081 9082 9083 9084 9085
	/*
	 * WaIncreaseL3CreditsForVLVB0:vlv
	 * This is the hardware default actually.
	 */
	I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);

9086
	/*
9087
	 * WaDisableVLVClockGating_VBIIssue:vlv
9088 9089 9090
	 * Disable clock gating on th GCFG unit to prevent a delay
	 * in the reporting of vblank events.
	 */
9091
	I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
9092 9093
}

9094
static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
9095
{
9096 9097 9098 9099 9100
	/* WaVSRefCountFullforceMissDisable:chv */
	/* WaDSRefCountFullforceMissDisable:chv */
	I915_WRITE(GEN7_FF_THREAD_MODE,
		   I915_READ(GEN7_FF_THREAD_MODE) &
		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
9101 9102 9103 9104

	/* WaDisableSemaphoreAndSyncFlipWait:chv */
	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
9105 9106 9107 9108

	/* WaDisableCSUnitClockGating:chv */
	I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
9109 9110 9111 9112

	/* WaDisableSDEUnitClockGating:chv */
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
9113

9114 9115 9116 9117 9118 9119 9120
	/*
	 * WaProgramL3SqcReg1Default:chv
	 * See gfxspecs/Related Documents/Performance Guide/
	 * LSQC Setting Recommendations.
	 */
	gen8_set_l3sqc_credits(dev_priv, 38, 2);

9121 9122 9123 9124 9125
	/*
	 * GTT cache may not work with big pages, so if those
	 * are ever enabled GTT cache may need to be disabled.
	 */
	I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
9126 9127
}

9128
static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139
{
	uint32_t dspclk_gate;

	I915_WRITE(RENCLK_GATE_D1, 0);
	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
		   GS_UNIT_CLOCK_GATE_DISABLE |
		   CL_UNIT_CLOCK_GATE_DISABLE);
	I915_WRITE(RAMCLK_GATE_D, 0);
	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
		OVRUNIT_CLOCK_GATE_DISABLE |
		OVCUNIT_CLOCK_GATE_DISABLE;
9140
	if (IS_GM45(dev_priv))
9141 9142
		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
9143 9144 9145 9146

	/* WaDisableRenderCachePipelinedFlush */
	I915_WRITE(CACHE_MODE_0,
		   _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
9147

9148 9149 9150
	/* WaDisable_RenderCache_OperationalFlush:g4x */
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));

9151
	g4x_disable_trickle_feed(dev_priv);
9152 9153
}

9154
static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
9155 9156 9157 9158 9159 9160
{
	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
	I915_WRITE(RENCLK_GATE_D2, 0);
	I915_WRITE(DSPCLK_GATE_D, 0);
	I915_WRITE(RAMCLK_GATE_D, 0);
	I915_WRITE16(DEUC, 0);
9161 9162
	I915_WRITE(MI_ARB_STATE,
		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9163 9164 9165

	/* WaDisable_RenderCache_OperationalFlush:gen4 */
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9166 9167
}

9168
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
9169 9170 9171 9172 9173 9174 9175
{
	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
		   I965_RCC_CLOCK_GATE_DISABLE |
		   I965_RCPB_CLOCK_GATE_DISABLE |
		   I965_ISC_CLOCK_GATE_DISABLE |
		   I965_FBC_CLOCK_GATE_DISABLE);
	I915_WRITE(RENCLK_GATE_D2, 0);
9176 9177
	I915_WRITE(MI_ARB_STATE,
		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9178 9179 9180

	/* WaDisable_RenderCache_OperationalFlush:gen4 */
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
9181 9182
}

9183
static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
9184 9185 9186 9187 9188 9189
{
	u32 dstate = I915_READ(D_STATE);

	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
		DSTATE_DOT_CLOCK_GATING;
	I915_WRITE(D_STATE, dstate);
9190

9191
	if (IS_PINEVIEW(dev_priv))
9192
		I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
9193 9194 9195

	/* IIR "flip pending" means done if this bit is set */
	I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
9196 9197

	/* interrupts should cause a wake up from C3 */
9198
	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
9199 9200 9201

	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
9202 9203 9204

	I915_WRITE(MI_ARB_STATE,
		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
9205 9206
}

9207
static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
9208 9209
{
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
9210 9211 9212 9213

	/* interrupts should cause a wake up from C3 */
	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
9214 9215 9216

	I915_WRITE(MEM_MODE,
		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
9217 9218
}

9219
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
9220
{
9221 9222 9223
	I915_WRITE(MEM_MODE,
		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
9224 9225
}

9226
void intel_init_clock_gating(struct drm_i915_private *dev_priv)
9227
{
9228
	dev_priv->display.init_clock_gating(dev_priv);
9229 9230
}

9231
void intel_suspend_hw(struct drm_i915_private *dev_priv)
9232
{
9233 9234
	if (HAS_PCH_LPT(dev_priv))
		lpt_suspend_hw(dev_priv);
9235 9236
}

9237
static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252
{
	DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
}

/**
 * intel_init_clock_gating_hooks - setup the clock gating hooks
 * @dev_priv: device private
 *
 * Setup the hooks that configure which clocks of a given platform can be
 * gated and also apply various GT and display specific workarounds for these
 * platforms. Note that some GT specific workarounds are applied separately
 * when GPU contexts or batchbuffers start their execution.
 */
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
9253
	if (IS_ICELAKE(dev_priv))
O
Oscar Mateo 已提交
9254
		dev_priv->display.init_clock_gating = icl_init_clock_gating;
9255
	else if (IS_CANNONLAKE(dev_priv))
9256
		dev_priv->display.init_clock_gating = cnl_init_clock_gating;
9257 9258
	else if (IS_COFFEELAKE(dev_priv))
		dev_priv->display.init_clock_gating = cfl_init_clock_gating;
9259
	else if (IS_SKYLAKE(dev_priv))
9260
		dev_priv->display.init_clock_gating = skl_init_clock_gating;
9261
	else if (IS_KABYLAKE(dev_priv))
9262
		dev_priv->display.init_clock_gating = kbl_init_clock_gating;
9263
	else if (IS_BROXTON(dev_priv))
9264
		dev_priv->display.init_clock_gating = bxt_init_clock_gating;
9265 9266
	else if (IS_GEMINILAKE(dev_priv))
		dev_priv->display.init_clock_gating = glk_init_clock_gating;
9267
	else if (IS_BROADWELL(dev_priv))
9268
		dev_priv->display.init_clock_gating = bdw_init_clock_gating;
9269
	else if (IS_CHERRYVIEW(dev_priv))
9270
		dev_priv->display.init_clock_gating = chv_init_clock_gating;
9271
	else if (IS_HASWELL(dev_priv))
9272
		dev_priv->display.init_clock_gating = hsw_init_clock_gating;
9273
	else if (IS_IVYBRIDGE(dev_priv))
9274
		dev_priv->display.init_clock_gating = ivb_init_clock_gating;
9275
	else if (IS_VALLEYVIEW(dev_priv))
9276
		dev_priv->display.init_clock_gating = vlv_init_clock_gating;
9277 9278 9279
	else if (IS_GEN6(dev_priv))
		dev_priv->display.init_clock_gating = gen6_init_clock_gating;
	else if (IS_GEN5(dev_priv))
9280
		dev_priv->display.init_clock_gating = ilk_init_clock_gating;
9281 9282
	else if (IS_G4X(dev_priv))
		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9283
	else if (IS_I965GM(dev_priv))
9284
		dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
9285
	else if (IS_I965G(dev_priv))
9286
		dev_priv->display.init_clock_gating = i965g_init_clock_gating;
9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298
	else if (IS_GEN3(dev_priv))
		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
	else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
	else if (IS_GEN2(dev_priv))
		dev_priv->display.init_clock_gating = i830_init_clock_gating;
	else {
		MISSING_CASE(INTEL_DEVID(dev_priv));
		dev_priv->display.init_clock_gating = nop_init_clock_gating;
	}
}

9299
/* Set up chip specific power management-related functions */
9300
void intel_init_pm(struct drm_i915_private *dev_priv)
9301
{
9302
	intel_fbc_init(dev_priv);
9303

9304
	/* For cxsr */
9305
	if (IS_PINEVIEW(dev_priv))
9306
		i915_pineview_get_mem_freq(dev_priv);
9307
	else if (IS_GEN5(dev_priv))
9308
		i915_ironlake_get_mem_freq(dev_priv);
9309

9310
	/* For FIFO watermark updates */
9311
	if (INTEL_GEN(dev_priv) >= 9) {
9312
		skl_setup_wm_latency(dev_priv);
9313
		dev_priv->display.initial_watermarks = skl_initial_wm;
9314
		dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
9315
		dev_priv->display.compute_global_watermarks = skl_compute_wm;
9316
	} else if (HAS_PCH_SPLIT(dev_priv)) {
9317
		ilk_setup_wm_latency(dev_priv);
9318

9319
		if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
9320
		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
9321
		    (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
9322
		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
9323
			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
9324 9325 9326 9327 9328 9329
			dev_priv->display.compute_intermediate_wm =
				ilk_compute_intermediate_wm;
			dev_priv->display.initial_watermarks =
				ilk_initial_watermarks;
			dev_priv->display.optimize_watermarks =
				ilk_optimize_watermarks;
9330 9331 9332 9333
		} else {
			DRM_DEBUG_KMS("Failed to read display plane latency. "
				      "Disable CxSR\n");
		}
9334
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9335
		vlv_setup_wm_latency(dev_priv);
9336
		dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
9337
		dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
9338
		dev_priv->display.initial_watermarks = vlv_initial_watermarks;
9339
		dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
9340
		dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
9341 9342 9343 9344 9345 9346
	} else if (IS_G4X(dev_priv)) {
		g4x_setup_wm_latency(dev_priv);
		dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
		dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
		dev_priv->display.initial_watermarks = g4x_initial_watermarks;
		dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
9347
	} else if (IS_PINEVIEW(dev_priv)) {
9348
		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
9349 9350 9351 9352 9353 9354 9355 9356 9357
					    dev_priv->is_ddr3,
					    dev_priv->fsb_freq,
					    dev_priv->mem_freq)) {
			DRM_INFO("failed to find known CxSR latency "
				 "(found ddr%s fsb freq %d, mem freq %d), "
				 "disabling CxSR\n",
				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
				 dev_priv->fsb_freq, dev_priv->mem_freq);
			/* Disable CxSR and never update its watermark again */
9358
			intel_set_memory_cxsr(dev_priv, false);
9359 9360 9361
			dev_priv->display.update_wm = NULL;
		} else
			dev_priv->display.update_wm = pineview_update_wm;
9362
	} else if (IS_GEN4(dev_priv)) {
9363
		dev_priv->display.update_wm = i965_update_wm;
9364
	} else if (IS_GEN3(dev_priv)) {
9365 9366
		dev_priv->display.update_wm = i9xx_update_wm;
		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9367
	} else if (IS_GEN2(dev_priv)) {
9368
		if (INTEL_INFO(dev_priv)->num_pipes == 1) {
9369
			dev_priv->display.update_wm = i845_update_wm;
9370
			dev_priv->display.get_fifo_size = i845_get_fifo_size;
9371 9372
		} else {
			dev_priv->display.update_wm = i9xx_update_wm;
9373
			dev_priv->display.get_fifo_size = i830_get_fifo_size;
9374 9375 9376
		}
	} else {
		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
9377 9378 9379
	}
}

9380 9381 9382 9383 9384 9385 9386 9387 9388
static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
{
	uint32_t flags =
		I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;

	switch (flags) {
	case GEN6_PCODE_SUCCESS:
		return 0;
	case GEN6_PCODE_UNIMPLEMENTED_CMD:
9389
		return -ENODEV;
9390 9391 9392
	case GEN6_PCODE_ILLEGAL_CMD:
		return -ENXIO;
	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
9393
	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
9394 9395 9396 9397
		return -EOVERFLOW;
	case GEN6_PCODE_TIMEOUT:
		return -ETIMEDOUT;
	default:
9398
		MISSING_CASE(flags);
9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424
		return 0;
	}
}

static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
{
	uint32_t flags =
		I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;

	switch (flags) {
	case GEN6_PCODE_SUCCESS:
		return 0;
	case GEN6_PCODE_ILLEGAL_CMD:
		return -ENXIO;
	case GEN7_PCODE_TIMEOUT:
		return -ETIMEDOUT;
	case GEN7_PCODE_ILLEGAL_DATA:
		return -EINVAL;
	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
		return -EOVERFLOW;
	default:
		MISSING_CASE(flags);
		return 0;
	}
}

9425
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
B
Ben Widawsky 已提交
9426
{
9427 9428
	int status;

9429
	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
B
Ben Widawsky 已提交
9430

9431 9432 9433 9434 9435 9436
	/* GEN6_PCODE_* are outside of the forcewake domain, we can
	 * use te fw I915_READ variants to reduce the amount of work
	 * required when reading/writing.
	 */

	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
9437 9438
		DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
				 mbox, __builtin_return_address(0));
B
Ben Widawsky 已提交
9439 9440 9441
		return -EAGAIN;
	}

9442 9443 9444
	I915_WRITE_FW(GEN6_PCODE_DATA, *val);
	I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
	I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
B
Ben Widawsky 已提交
9445

9446 9447 9448
	if (__intel_wait_for_register_fw(dev_priv,
					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
					 500, 0, NULL)) {
9449 9450
		DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
			  mbox, __builtin_return_address(0));
B
Ben Widawsky 已提交
9451 9452 9453
		return -ETIMEDOUT;
	}

9454 9455
	*val = I915_READ_FW(GEN6_PCODE_DATA);
	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
B
Ben Widawsky 已提交
9456

9457 9458 9459 9460 9461 9462
	if (INTEL_GEN(dev_priv) > 6)
		status = gen7_check_mailbox_status(dev_priv);
	else
		status = gen6_check_mailbox_status(dev_priv);

	if (status) {
9463 9464
		DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
				 mbox, __builtin_return_address(0), status);
9465 9466 9467
		return status;
	}

B
Ben Widawsky 已提交
9468 9469 9470
	return 0;
}

9471
int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
9472 9473
				    u32 mbox, u32 val,
				    int fast_timeout_us, int slow_timeout_ms)
B
Ben Widawsky 已提交
9474
{
9475 9476
	int status;

9477
	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
B
Ben Widawsky 已提交
9478

9479 9480 9481 9482 9483 9484
	/* GEN6_PCODE_* are outside of the forcewake domain, we can
	 * use te fw I915_READ variants to reduce the amount of work
	 * required when reading/writing.
	 */

	if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
9485 9486
		DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
				 val, mbox, __builtin_return_address(0));
B
Ben Widawsky 已提交
9487 9488 9489
		return -EAGAIN;
	}

9490
	I915_WRITE_FW(GEN6_PCODE_DATA, val);
9491
	I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
9492
	I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
B
Ben Widawsky 已提交
9493

9494 9495
	if (__intel_wait_for_register_fw(dev_priv,
					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
9496 9497
					 fast_timeout_us, slow_timeout_ms,
					 NULL)) {
9498 9499
		DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
			  val, mbox, __builtin_return_address(0));
B
Ben Widawsky 已提交
9500 9501 9502
		return -ETIMEDOUT;
	}

9503
	I915_WRITE_FW(GEN6_PCODE_DATA, 0);
B
Ben Widawsky 已提交
9504

9505 9506 9507 9508 9509 9510
	if (INTEL_GEN(dev_priv) > 6)
		status = gen7_check_mailbox_status(dev_priv);
	else
		status = gen6_check_mailbox_status(dev_priv);

	if (status) {
9511 9512
		DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
				 val, mbox, __builtin_return_address(0), status);
9513 9514 9515
		return status;
	}

B
Ben Widawsky 已提交
9516 9517
	return 0;
}
9518

9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539
static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
				  u32 request, u32 reply_mask, u32 reply,
				  u32 *status)
{
	u32 val = request;

	*status = sandybridge_pcode_read(dev_priv, mbox, &val);

	return *status || ((val & reply_mask) == reply);
}

/**
 * skl_pcode_request - send PCODE request until acknowledgment
 * @dev_priv: device private
 * @mbox: PCODE mailbox ID the request is targeted for
 * @request: request ID
 * @reply_mask: mask used to check for request acknowledgment
 * @reply: value used to check for request acknowledgment
 * @timeout_base_ms: timeout for polling with preemption enabled
 *
 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
9540
 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
9541 9542
 * The request is acknowledged once the PCODE reply dword equals @reply after
 * applying @reply_mask. Polling is first attempted with preemption enabled
9543
 * for @timeout_base_ms and if this times out for another 50 ms with
9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554
 * preemption disabled.
 *
 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
 * other error as reported by PCODE.
 */
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
		      u32 reply_mask, u32 reply, int timeout_base_ms)
{
	u32 status;
	int ret;

9555
	WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock));
9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569

#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
				   &status)

	/*
	 * Prime the PCODE by doing a request first. Normally it guarantees
	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
	 * _wait_for() doesn't guarantee when its passed condition is evaluated
	 * first, so send the first request explicitly.
	 */
	if (COND) {
		ret = 0;
		goto out;
	}
9570
	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
9571 9572 9573 9574 9575 9576 9577 9578
	if (!ret)
		goto out;

	/*
	 * The above can time out if the number of requests was low (2 in the
	 * worst case) _and_ PCODE was busy for some reason even after a
	 * (queued) request and @timeout_base_ms delay. As a workaround retry
	 * the poll with preemption disabled to maximize the number of
9579
	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
9580
	 * account for interrupts that could reduce the number of these
9581 9582
	 * requests, and for any quirks of the PCODE firmware that delays
	 * the request completion.
9583 9584 9585 9586
	 */
	DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
	WARN_ON_ONCE(timeout_base_ms > 3);
	preempt_disable();
9587
	ret = wait_for_atomic(COND, 50);
9588 9589 9590 9591 9592 9593 9594
	preempt_enable();

out:
	return ret ? ret : status;
#undef COND
}

9595 9596
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
9597 9598
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

9599 9600 9601 9602
	/*
	 * N = val - 0xb7
	 * Slow = Fast = GPLL ref * N
	 */
9603
	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
9604 9605
}

9606
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
9607
{
9608 9609 9610
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

	return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
9611 9612
}

9613
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
9614
{
9615 9616
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

9617 9618 9619 9620
	/*
	 * N = val / 2
	 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
	 */
9621
	return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
9622 9623
}

9624
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
9625
{
9626 9627
	struct intel_rps *rps = &dev_priv->gt_pm.rps;

9628
	/* CHV needs even values */
9629
	return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
9630 9631
}

9632
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
9633
{
9634
	if (INTEL_GEN(dev_priv) >= 9)
9635 9636
		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
					 GEN9_FREQ_SCALER);
9637
	else if (IS_CHERRYVIEW(dev_priv))
9638
		return chv_gpu_freq(dev_priv, val);
9639
	else if (IS_VALLEYVIEW(dev_priv))
9640 9641 9642
		return byt_gpu_freq(dev_priv, val);
	else
		return val * GT_FREQUENCY_MULTIPLIER;
9643 9644
}

9645 9646
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
9647
	if (INTEL_GEN(dev_priv) >= 9)
9648 9649
		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
					 GT_FREQUENCY_MULTIPLIER);
9650
	else if (IS_CHERRYVIEW(dev_priv))
9651
		return chv_freq_opcode(dev_priv, val);
9652
	else if (IS_VALLEYVIEW(dev_priv))
9653 9654
		return byt_freq_opcode(dev_priv, val);
	else
9655
		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
9656
}
9657

9658
void intel_pm_setup(struct drm_i915_private *dev_priv)
9659
{
9660
	mutex_init(&dev_priv->pcu_lock);
C
Chris Wilson 已提交
9661
	mutex_init(&dev_priv->gt_pm.rps.power.mutex);
D
Daniel Vetter 已提交
9662

9663
	atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
9664

9665 9666
	dev_priv->runtime_pm.suspended = false;
	atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
9667
}
9668

9669 9670 9671
static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
			     const i915_reg_t reg)
{
9672
	u32 lower, upper, tmp;
9673
	int loop = 2;
9674

9675 9676
	/*
	 * The register accessed do not need forcewake. We borrow
9677 9678
	 * uncore lock to prevent concurrent access to range reg.
	 */
9679
	lockdep_assert_held(&dev_priv->uncore.lock);
9680

9681 9682
	/*
	 * vlv and chv residency counters are 40 bits in width.
9683 9684
	 * With a control bit, we can choose between upper or lower
	 * 32bit window into this counter.
9685 9686 9687 9688 9689
	 *
	 * Although we always use the counter in high-range mode elsewhere,
	 * userspace may attempt to read the value before rc6 is initialised,
	 * before we have set the default VLV_COUNTER_CONTROL value. So always
	 * set the high bit to be safe.
9690
	 */
9691 9692
	I915_WRITE_FW(VLV_COUNTER_CONTROL,
		      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703
	upper = I915_READ_FW(reg);
	do {
		tmp = upper;

		I915_WRITE_FW(VLV_COUNTER_CONTROL,
			      _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
		lower = I915_READ_FW(reg);

		I915_WRITE_FW(VLV_COUNTER_CONTROL,
			      _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
		upper = I915_READ_FW(reg);
9704
	} while (upper != tmp && --loop);
9705

9706 9707
	/*
	 * Everywhere else we always use VLV_COUNTER_CONTROL with the
9708 9709 9710 9711
	 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
	 * now.
	 */

9712 9713 9714
	return lower | (u64)upper << 8;
}

9715
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
9716
			   const i915_reg_t reg)
9717
{
9718 9719 9720 9721
	u64 time_hw, prev_hw, overflow_hw;
	unsigned int fw_domains;
	unsigned long flags;
	unsigned int i;
9722
	u32 mul, div;
9723

9724
	if (!HAS_RC6(dev_priv))
9725 9726
		return 0;

9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743
	/*
	 * Store previous hw counter values for counter wrap-around handling.
	 *
	 * There are only four interesting registers and they live next to each
	 * other so we can use the relative address, compared to the smallest
	 * one as the index into driver storage.
	 */
	i = (i915_mmio_reg_offset(reg) -
	     i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
	if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
		return 0;

	fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);

	spin_lock_irqsave(&dev_priv->uncore.lock, flags);
	intel_uncore_forcewake_get__locked(dev_priv, fw_domains);

9744 9745
	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
9746
		mul = 1000000;
9747
		div = dev_priv->czclk_freq;
9748
		overflow_hw = BIT_ULL(40);
9749 9750
		time_hw = vlv_residency_raw(dev_priv, reg);
	} else {
9751 9752 9753 9754 9755 9756 9757 9758
		/* 833.33ns units on Gen9LP, 1.28us elsewhere. */
		if (IS_GEN9_LP(dev_priv)) {
			mul = 10000;
			div = 12;
		} else {
			mul = 1280;
			div = 1;
		}
9759

9760 9761
		overflow_hw = BIT_ULL(32);
		time_hw = I915_READ_FW(reg);
9762
	}
9763

9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786
	/*
	 * Counter wrap handling.
	 *
	 * But relying on a sufficient frequency of queries otherwise counters
	 * can still wrap.
	 */
	prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i];
	dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw;

	/* RC6 delta from last sample. */
	if (time_hw >= prev_hw)
		time_hw -= prev_hw;
	else
		time_hw += overflow_hw - prev_hw;

	/* Add delta to RC6 extended raw driver copy. */
	time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
	dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;

	intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);

	return mul_u64_u32_div(time_hw, mul, div);
9787
}
T
Tvrtko Ursulin 已提交
9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801

u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
{
	u32 cagf;

	if (INTEL_GEN(dev_priv) >= 9)
		cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
	else
		cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;

	return  cagf;
}