intel_cdclk.c 86.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2006-2017 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

24
#include <linux/time.h>
25

26
#include "intel_atomic.h"
27
#include "intel_bw.h"
28
#include "intel_cdclk.h"
29
#include "intel_de.h"
30
#include "intel_display_types.h"
31
#include "intel_psr.h"
32
#include "intel_sideband.h"
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

/**
 * DOC: CDCLK / RAWCLK
 *
 * The display engine uses several different clocks to do its work. There
 * are two main clocks involved that aren't directly related to the actual
 * pixel clock or any symbol/bit clock of the actual output port. These
 * are the core display clock (CDCLK) and RAWCLK.
 *
 * CDCLK clocks most of the display pipe logic, and thus its frequency
 * must be high enough to support the rate at which pixels are flowing
 * through the pipes. Downscaling must also be accounted as that increases
 * the effective pixel rate.
 *
 * On several platforms the CDCLK frequency can be changed dynamically
 * to minimize power consumption for a given display configuration.
 * Typically changes to the CDCLK frequency require all the display pipes
 * to be shut down while the frequency is being changed.
 *
 * On SKL+ the DMC will toggle the CDCLK off/on during DC5/6 entry/exit.
 * DMC will not change the active CDCLK frequency however, so that part
 * will still be performed by the driver directly.
 *
 * RAWCLK is a fixed frequency clock, often used by various auxiliary
 * blocks such as AUX CH or backlight PWM. Hence the only thing we
 * really need to know about RAWCLK is its frequency so that various
 * dividers can be programmed correctly.
 */

62 63 64
void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
			   struct intel_cdclk_config *cdclk_config)
{
65
	dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config);
66 67 68 69 70
}

int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
71
	return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state);
72 73 74 75 76 77
}

static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
				  const struct intel_cdclk_config *cdclk_config,
				  enum pipe pipe)
{
78
	dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe);
79 80 81 82 83
}

static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv,
					  struct intel_cdclk_state *cdclk_config)
{
84
	return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config);
85 86 87 88 89
}

static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv,
					 int cdclk)
{
90
	return dev_priv->cdclk_funcs->calc_voltage_level(cdclk);
91 92
}

93
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
94
				   struct intel_cdclk_config *cdclk_config)
95
{
96
	cdclk_config->cdclk = 133333;
97 98
}

99
static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
100
				   struct intel_cdclk_config *cdclk_config)
101
{
102
	cdclk_config->cdclk = 200000;
103 104
}

105
static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
106
				   struct intel_cdclk_config *cdclk_config)
107
{
108
	cdclk_config->cdclk = 266667;
109 110
}

111
static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
112
				   struct intel_cdclk_config *cdclk_config)
113
{
114
	cdclk_config->cdclk = 333333;
115 116
}

117
static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
118
				   struct intel_cdclk_config *cdclk_config)
119
{
120
	cdclk_config->cdclk = 400000;
121 122
}

123
static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
124
				   struct intel_cdclk_config *cdclk_config)
125
{
126
	cdclk_config->cdclk = 450000;
127 128
}

129
static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
130
			   struct intel_cdclk_config *cdclk_config)
131
{
132
	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
133 134 135 136 137 138 139
	u16 hpllcc = 0;

	/*
	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
	 * encoding is different :(
	 * FIXME is this the right way to detect 852GM/852GMV?
	 */
140
	if (pdev->revision == 0x1) {
141
		cdclk_config->cdclk = 133333;
142 143
		return;
	}
144 145 146 147 148 149 150 151 152 153 154

	pci_bus_read_config_word(pdev->bus,
				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);

	/* Assume that the hardware is in the high speed state.  This
	 * should be the default.
	 */
	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
	case GC_CLOCK_133_200:
	case GC_CLOCK_133_200_2:
	case GC_CLOCK_100_200:
155
		cdclk_config->cdclk = 200000;
156
		break;
157
	case GC_CLOCK_166_250:
158
		cdclk_config->cdclk = 250000;
159
		break;
160
	case GC_CLOCK_100_133:
161
		cdclk_config->cdclk = 133333;
162
		break;
163 164 165
	case GC_CLOCK_133_266:
	case GC_CLOCK_133_266_2:
	case GC_CLOCK_166_266:
166
		cdclk_config->cdclk = 266667;
167
		break;
168 169 170
	}
}

171
static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
172
			     struct intel_cdclk_config *cdclk_config)
173
{
174
	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
175 176 177 178
	u16 gcfgc = 0;

	pci_read_config_word(pdev, GCFGC, &gcfgc);

179
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
180
		cdclk_config->cdclk = 133333;
181 182
		return;
	}
183 184 185

	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
	case GC_DISPLAY_CLOCK_333_320_MHZ:
186
		cdclk_config->cdclk = 333333;
187
		break;
188 189
	default:
	case GC_DISPLAY_CLOCK_190_200_MHZ:
190
		cdclk_config->cdclk = 190000;
191
		break;
192 193 194
	}
}

195
static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
196
			     struct intel_cdclk_config *cdclk_config)
197
{
198
	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
199 200 201 202
	u16 gcfgc = 0;

	pci_read_config_word(pdev, GCFGC, &gcfgc);

203
	if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
204
		cdclk_config->cdclk = 133333;
205 206
		return;
	}
207 208 209

	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
	case GC_DISPLAY_CLOCK_333_320_MHZ:
210
		cdclk_config->cdclk = 320000;
211
		break;
212 213
	default:
	case GC_DISPLAY_CLOCK_190_200_MHZ:
214
		cdclk_config->cdclk = 200000;
215
		break;
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
	}
}

static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
{
	static const unsigned int blb_vco[8] = {
		[0] = 3200000,
		[1] = 4000000,
		[2] = 5333333,
		[3] = 4800000,
		[4] = 6400000,
	};
	static const unsigned int pnv_vco[8] = {
		[0] = 3200000,
		[1] = 4000000,
		[2] = 5333333,
		[3] = 4800000,
		[4] = 2666667,
	};
	static const unsigned int cl_vco[8] = {
		[0] = 3200000,
		[1] = 4000000,
		[2] = 5333333,
		[3] = 6400000,
		[4] = 3333333,
		[5] = 3566667,
		[6] = 4266667,
	};
	static const unsigned int elk_vco[8] = {
		[0] = 3200000,
		[1] = 4000000,
		[2] = 5333333,
		[3] = 4800000,
	};
	static const unsigned int ctg_vco[8] = {
		[0] = 3200000,
		[1] = 4000000,
		[2] = 5333333,
		[3] = 6400000,
		[4] = 2666667,
		[5] = 4266667,
	};
	const unsigned int *vco_table;
	unsigned int vco;
260
	u8 tmp = 0;
261 262 263 264

	/* FIXME other chipsets? */
	if (IS_GM45(dev_priv))
		vco_table = ctg_vco;
265
	else if (IS_G45(dev_priv))
266 267 268 269 270 271 272 273 274 275
		vco_table = elk_vco;
	else if (IS_I965GM(dev_priv))
		vco_table = cl_vco;
	else if (IS_PINEVIEW(dev_priv))
		vco_table = pnv_vco;
	else if (IS_G33(dev_priv))
		vco_table = blb_vco;
	else
		return 0;

276 277
	tmp = intel_de_read(dev_priv,
			    IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
278 279 280

	vco = vco_table[tmp & 0x7];
	if (vco == 0)
281 282
		drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n",
			tmp);
283
	else
284
		drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco);
285 286 287 288

	return vco;
}

289
static void g33_get_cdclk(struct drm_i915_private *dev_priv,
290
			  struct intel_cdclk_config *cdclk_config)
291
{
292
	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
293 294 295 296 297
	static const u8 div_3200[] = { 12, 10,  8,  7, 5, 16 };
	static const u8 div_4000[] = { 14, 12, 10,  8, 6, 20 };
	static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
	static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 };
	const u8 *div_table;
298
	unsigned int cdclk_sel;
299
	u16 tmp = 0;
300

301
	cdclk_config->vco = intel_hpll_vco(dev_priv);
302

303 304 305 306 307 308 309
	pci_read_config_word(pdev, GCFGC, &tmp);

	cdclk_sel = (tmp >> 4) & 0x7;

	if (cdclk_sel >= ARRAY_SIZE(div_3200))
		goto fail;

310
	switch (cdclk_config->vco) {
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	case 3200000:
		div_table = div_3200;
		break;
	case 4000000:
		div_table = div_4000;
		break;
	case 4800000:
		div_table = div_4800;
		break;
	case 5333333:
		div_table = div_5333;
		break;
	default:
		goto fail;
	}

327 328
	cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
						div_table[cdclk_sel]);
329
	return;
330 331

fail:
332 333
	drm_err(&dev_priv->drm,
		"Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
334 335
		cdclk_config->vco, tmp);
	cdclk_config->cdclk = 190476;
336 337
}

338
static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
339
			  struct intel_cdclk_config *cdclk_config)
340
{
341
	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
342 343 344 345 346 347
	u16 gcfgc = 0;

	pci_read_config_word(pdev, GCFGC, &gcfgc);

	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
348
		cdclk_config->cdclk = 266667;
349
		break;
350
	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
351
		cdclk_config->cdclk = 333333;
352
		break;
353
	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
354
		cdclk_config->cdclk = 444444;
355
		break;
356
	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
357
		cdclk_config->cdclk = 200000;
358
		break;
359
	default:
360 361
		drm_err(&dev_priv->drm,
			"Unknown pnv display core clock 0x%04x\n", gcfgc);
362
		fallthrough;
363
	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
364
		cdclk_config->cdclk = 133333;
365
		break;
366
	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
367
		cdclk_config->cdclk = 166667;
368
		break;
369 370 371
	}
}

372
static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
373
			     struct intel_cdclk_config *cdclk_config)
374
{
375
	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
376 377 378 379
	static const u8 div_3200[] = { 16, 10,  8 };
	static const u8 div_4000[] = { 20, 12, 10 };
	static const u8 div_5333[] = { 24, 16, 14 };
	const u8 *div_table;
380
	unsigned int cdclk_sel;
381
	u16 tmp = 0;
382

383
	cdclk_config->vco = intel_hpll_vco(dev_priv);
384

385 386 387 388 389 390 391
	pci_read_config_word(pdev, GCFGC, &tmp);

	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;

	if (cdclk_sel >= ARRAY_SIZE(div_3200))
		goto fail;

392
	switch (cdclk_config->vco) {
393 394 395 396 397 398 399 400 401 402 403 404 405
	case 3200000:
		div_table = div_3200;
		break;
	case 4000000:
		div_table = div_4000;
		break;
	case 5333333:
		div_table = div_5333;
		break;
	default:
		goto fail;
	}

406 407
	cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
						div_table[cdclk_sel]);
408
	return;
409 410

fail:
411 412
	drm_err(&dev_priv->drm,
		"Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
413 414
		cdclk_config->vco, tmp);
	cdclk_config->cdclk = 200000;
415 416
}

417
static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
418
			   struct intel_cdclk_config *cdclk_config)
419
{
420
	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
421
	unsigned int cdclk_sel;
422
	u16 tmp = 0;
423

424
	cdclk_config->vco = intel_hpll_vco(dev_priv);
425

426 427 428 429
	pci_read_config_word(pdev, GCFGC, &tmp);

	cdclk_sel = (tmp >> 12) & 0x1;

430
	switch (cdclk_config->vco) {
431 432 433
	case 2666667:
	case 4000000:
	case 5333333:
434
		cdclk_config->cdclk = cdclk_sel ? 333333 : 222222;
435
		break;
436
	case 3200000:
437
		cdclk_config->cdclk = cdclk_sel ? 320000 : 228571;
438
		break;
439
	default:
440 441
		drm_err(&dev_priv->drm,
			"Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
442 443
			cdclk_config->vco, tmp);
		cdclk_config->cdclk = 222222;
444
		break;
445 446 447
	}
}

448
static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
449
			  struct intel_cdclk_config *cdclk_config)
450
{
451
	u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
452
	u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
453 454

	if (lcpll & LCPLL_CD_SOURCE_FCLK)
455
		cdclk_config->cdclk = 800000;
456
	else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
457
		cdclk_config->cdclk = 450000;
458
	else if (freq == LCPLL_CLK_FREQ_450)
459
		cdclk_config->cdclk = 450000;
460
	else if (IS_HSW_ULT(dev_priv))
461
		cdclk_config->cdclk = 337500;
462
	else
463
		cdclk_config->cdclk = 540000;
464 465
}

466
static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
467 468 469 470 471 472 473 474 475
{
	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ?
		333333 : 320000;

	/*
	 * We seem to get an unstable or solid color picture at 200MHz.
	 * Not sure what's wrong. For now use 200MHz only when all pipes
	 * are off.
	 */
476
	if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
477
		return 400000;
478
	else if (min_cdclk > 266667)
479
		return freq_320;
480
	else if (min_cdclk > 0)
481 482 483 484 485
		return 266667;
	else
		return 200000;
}

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
{
	if (IS_VALLEYVIEW(dev_priv)) {
		if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
			return 2;
		else if (cdclk >= 266667)
			return 1;
		else
			return 0;
	} else {
		/*
		 * Specs are full of misinformation, but testing on actual
		 * hardware has shown that we just need to write the desired
		 * CCK divider into the Punit register.
		 */
		return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
	}
}

505
static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
506
			  struct intel_cdclk_config *cdclk_config)
507
{
508 509
	u32 val;

510 511 512
	vlv_iosf_sb_get(dev_priv,
			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));

513 514 515 516
	cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
	cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
						CCK_DISPLAY_CLOCK_CONTROL,
						cdclk_config->vco);
517

518
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
519 520 521

	vlv_iosf_sb_put(dev_priv,
			BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
522 523

	if (IS_VALLEYVIEW(dev_priv))
524
		cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
525 526
			DSPFREQGUAR_SHIFT;
	else
527
		cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
528
			DSPFREQGUAR_SHIFT_CHV;
529 530 531 532 533 534 535 536 537 538 539
}

static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
{
	unsigned int credits, default_credits;

	if (IS_CHERRYVIEW(dev_priv))
		default_credits = PFI_CREDIT(12);
	else
		default_credits = PFI_CREDIT(8);

540
	if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
541 542 543 544 545 546 547 548 549 550 551 552 553
		/* CHV suggested value is 31 or 63 */
		if (IS_CHERRYVIEW(dev_priv))
			credits = PFI_CREDIT_63;
		else
			credits = PFI_CREDIT(15);
	} else {
		credits = default_credits;
	}

	/*
	 * WA - write default credits before re-programming
	 * FIXME: should we also set the resend bit here?
	 */
554 555
	intel_de_write(dev_priv, GCI_CONTROL,
		       VGA_FAST_MODE_DISABLE | default_credits);
556

557 558
	intel_de_write(dev_priv, GCI_CONTROL,
		       VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND);
559 560 561 562 563

	/*
	 * FIXME is this guaranteed to clear
	 * immediately or should we poll for it?
	 */
564 565
	drm_WARN_ON(&dev_priv->drm,
		    intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
566 567
}

568
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
569
			  const struct intel_cdclk_config *cdclk_config,
570
			  enum pipe pipe)
571
{
572 573
	int cdclk = cdclk_config->cdclk;
	u32 val, cmd = cdclk_config->voltage_level;
574
	intel_wakeref_t wakeref;
575

576 577 578 579 580 581 582 583 584 585 586 587
	switch (cdclk) {
	case 400000:
	case 333333:
	case 320000:
	case 266667:
	case 200000:
		break;
	default:
		MISSING_CASE(cdclk);
		return;
	}

588 589 590
	/* There are cases where we can end up here with power domains
	 * off and a CDCLK frequency other than the minimum, like when
	 * issuing a modeset without actually changing any display after
591
	 * a system suspend.  So grab the display core domain, which covers
592 593
	 * the HW blocks needed for the following programming.
	 */
594
	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
595

596 597 598 599 600
	vlv_iosf_sb_get(dev_priv,
			BIT(VLV_IOSF_SB_CCK) |
			BIT(VLV_IOSF_SB_BUNIT) |
			BIT(VLV_IOSF_SB_PUNIT));

601
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
602 603
	val &= ~DSPFREQGUAR_MASK;
	val |= (cmd << DSPFREQGUAR_SHIFT);
604 605
	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
606 607
		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
		     50)) {
608 609
		drm_err(&dev_priv->drm,
			"timed out waiting for CDclk change\n");
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
	}

	if (cdclk == 400000) {
		u32 divider;

		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
					    cdclk) - 1;

		/* adjust cdclk divider */
		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
		val &= ~CCK_FREQUENCY_VALUES;
		val |= divider;
		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);

		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
			     50))
627 628
			drm_err(&dev_priv->drm,
				"timed out waiting for CDclk change\n");
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
	}

	/* adjust self-refresh exit latency value */
	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
	val &= ~0x7f;

	/*
	 * For high bandwidth configs, we set a higher latency in the bunit
	 * so that the core display fetch happens in time to avoid underruns.
	 */
	if (cdclk == 400000)
		val |= 4500 / 250; /* 4.5 usec */
	else
		val |= 3000 / 250; /* 3.0 usec */
	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);

645
	vlv_iosf_sb_put(dev_priv,
646 647 648
			BIT(VLV_IOSF_SB_CCK) |
			BIT(VLV_IOSF_SB_BUNIT) |
			BIT(VLV_IOSF_SB_PUNIT));
649 650

	intel_update_cdclk(dev_priv);
651 652

	vlv_program_pfi_credits(dev_priv);
653

654
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
655 656
}

657
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
658
			  const struct intel_cdclk_config *cdclk_config,
659
			  enum pipe pipe)
660
{
661 662
	int cdclk = cdclk_config->cdclk;
	u32 val, cmd = cdclk_config->voltage_level;
663
	intel_wakeref_t wakeref;
664 665 666 667 668 669 670 671 672 673 674 675

	switch (cdclk) {
	case 333333:
	case 320000:
	case 266667:
	case 200000:
		break;
	default:
		MISSING_CASE(cdclk);
		return;
	}

676 677 678
	/* There are cases where we can end up here with power domains
	 * off and a CDCLK frequency other than the minimum, like when
	 * issuing a modeset without actually changing any display after
679
	 * a system suspend.  So grab the display core domain, which covers
680 681
	 * the HW blocks needed for the following programming.
	 */
682
	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
683

684
	vlv_punit_get(dev_priv);
685
	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
686 687
	val &= ~DSPFREQGUAR_MASK_CHV;
	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
688 689
	vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
690 691
		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
		     50)) {
692 693
		drm_err(&dev_priv->drm,
			"timed out waiting for CDclk change\n");
694
	}
695 696

	vlv_punit_put(dev_priv);
697 698

	intel_update_cdclk(dev_priv);
699 700

	vlv_program_pfi_credits(dev_priv);
701

702
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
703 704
}

705
static int bdw_calc_cdclk(int min_cdclk)
706
{
707
	if (min_cdclk > 540000)
708
		return 675000;
709
	else if (min_cdclk > 450000)
710
		return 540000;
711
	else if (min_cdclk > 337500)
712 713 714 715 716
		return 450000;
	else
		return 337500;
}

717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
static u8 bdw_calc_voltage_level(int cdclk)
{
	switch (cdclk) {
	default:
	case 337500:
		return 2;
	case 450000:
		return 0;
	case 540000:
		return 1;
	case 675000:
		return 3;
	}
}

732
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
733
			  struct intel_cdclk_config *cdclk_config)
734
{
735
	u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
736
	u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
737 738

	if (lcpll & LCPLL_CD_SOURCE_FCLK)
739
		cdclk_config->cdclk = 800000;
740
	else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
741
		cdclk_config->cdclk = 450000;
742
	else if (freq == LCPLL_CLK_FREQ_450)
743
		cdclk_config->cdclk = 450000;
744
	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
745
		cdclk_config->cdclk = 540000;
746
	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
747
		cdclk_config->cdclk = 337500;
748
	else
749
		cdclk_config->cdclk = 675000;
750 751 752 753 754

	/*
	 * Can't read this out :( Let's assume it's
	 * at least what the CDCLK frequency requires.
	 */
755 756
	cdclk_config->voltage_level =
		bdw_calc_voltage_level(cdclk_config->cdclk);
757 758
}

759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
static u32 bdw_cdclk_freq_sel(int cdclk)
{
	switch (cdclk) {
	default:
		MISSING_CASE(cdclk);
		fallthrough;
	case 337500:
		return LCPLL_CLK_FREQ_337_5_BDW;
	case 450000:
		return LCPLL_CLK_FREQ_450;
	case 540000:
		return LCPLL_CLK_FREQ_54O_BDW;
	case 675000:
		return LCPLL_CLK_FREQ_675_BDW;
	}
}

776
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
777
			  const struct intel_cdclk_config *cdclk_config,
778
			  enum pipe pipe)
779
{
780
	int cdclk = cdclk_config->cdclk;
781 782
	int ret;

783 784 785 786 787 788 789
	if (drm_WARN(&dev_priv->drm,
		     (intel_de_read(dev_priv, LCPLL_CTL) &
		      (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
		       LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
		       LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
		       LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
		     "trying to change cdclk frequency with cdclk not enabled\n"))
790 791 792 793 794
		return;

	ret = sandybridge_pcode_write(dev_priv,
				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
	if (ret) {
795 796
		drm_err(&dev_priv->drm,
			"failed to inform pcode about cdclk change\n");
797 798 799
		return;
	}

800 801
	intel_de_rmw(dev_priv, LCPLL_CTL,
		     0, LCPLL_CD_SOURCE_FCLK);
802

803 804 805 806
	/*
	 * According to the spec, it should be enough to poll for this 1 us.
	 * However, extensive testing shows that this can take longer.
	 */
807
	if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
808
			LCPLL_CD_SOURCE_FCLK_DONE, 100))
809
		drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
810

811 812
	intel_de_rmw(dev_priv, LCPLL_CTL,
		     LCPLL_CLK_FREQ_MASK, bdw_cdclk_freq_sel(cdclk));
813

814 815
	intel_de_rmw(dev_priv, LCPLL_CTL,
		     LCPLL_CD_SOURCE_FCLK, 0);
816

817 818
	if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
			 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
819
		drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
820

821
	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
822
				cdclk_config->voltage_level);
823

824 825
	intel_de_write(dev_priv, CDCLK_FREQ,
		       DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
826 827 828 829

	intel_update_cdclk(dev_priv);
}

830
static int skl_calc_cdclk(int min_cdclk, int vco)
831 832
{
	if (vco == 8640000) {
833
		if (min_cdclk > 540000)
834
			return 617143;
835
		else if (min_cdclk > 432000)
836
			return 540000;
837
		else if (min_cdclk > 308571)
838 839 840 841
			return 432000;
		else
			return 308571;
	} else {
842
		if (min_cdclk > 540000)
843
			return 675000;
844
		else if (min_cdclk > 450000)
845
			return 540000;
846
		else if (min_cdclk > 337500)
847 848 849 850 851 852
			return 450000;
		else
			return 337500;
	}
}

853 854
static u8 skl_calc_voltage_level(int cdclk)
{
855
	if (cdclk > 540000)
856
		return 3;
857 858 859 860 861 862
	else if (cdclk > 450000)
		return 2;
	else if (cdclk > 337500)
		return 1;
	else
		return 0;
863 864
}

865
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
866
			     struct intel_cdclk_config *cdclk_config)
867 868 869
{
	u32 val;

870 871
	cdclk_config->ref = 24000;
	cdclk_config->vco = 0;
872

873
	val = intel_de_read(dev_priv, LCPLL1_CTL);
874 875 876
	if ((val & LCPLL_PLL_ENABLE) == 0)
		return;

877
	if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0))
878 879
		return;

880
	val = intel_de_read(dev_priv, DPLL_CTRL1);
881

882 883 884 885 886
	if (drm_WARN_ON(&dev_priv->drm,
			(val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
				DPLL_CTRL1_SSC(SKL_DPLL0) |
				DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
			DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
887 888 889 890 891 892 893
		return;

	switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
894
		cdclk_config->vco = 8100000;
895 896 897
		break;
	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
898
		cdclk_config->vco = 8640000;
899 900 901 902 903 904 905
		break;
	default:
		MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
		break;
	}
}

906
static void skl_get_cdclk(struct drm_i915_private *dev_priv,
907
			  struct intel_cdclk_config *cdclk_config)
908 909 910
{
	u32 cdctl;

911
	skl_dpll0_update(dev_priv, cdclk_config);
912

913
	cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref;
914

915
	if (cdclk_config->vco == 0)
916
		goto out;
917

918
	cdctl = intel_de_read(dev_priv, CDCLK_CTL);
919

920
	if (cdclk_config->vco == 8640000) {
921 922
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
		case CDCLK_FREQ_450_432:
923
			cdclk_config->cdclk = 432000;
924
			break;
925
		case CDCLK_FREQ_337_308:
926
			cdclk_config->cdclk = 308571;
927
			break;
928
		case CDCLK_FREQ_540:
929
			cdclk_config->cdclk = 540000;
930
			break;
931
		case CDCLK_FREQ_675_617:
932
			cdclk_config->cdclk = 617143;
933
			break;
934 935
		default:
			MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
936
			break;
937 938 939 940
		}
	} else {
		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
		case CDCLK_FREQ_450_432:
941
			cdclk_config->cdclk = 450000;
942
			break;
943
		case CDCLK_FREQ_337_308:
944
			cdclk_config->cdclk = 337500;
945
			break;
946
		case CDCLK_FREQ_540:
947
			cdclk_config->cdclk = 540000;
948
			break;
949
		case CDCLK_FREQ_675_617:
950
			cdclk_config->cdclk = 675000;
951
			break;
952 953
		default:
			MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
954
			break;
955 956
		}
	}
957 958 959 960 961 962

 out:
	/*
	 * Can't read this out :( Let's assume it's
	 * at least what the CDCLK frequency requires.
	 */
963 964
	cdclk_config->voltage_level =
		skl_calc_voltage_level(cdclk_config->cdclk);
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983
}

/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
static int skl_cdclk_decimal(int cdclk)
{
	return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
}

static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
					int vco)
{
	bool changed = dev_priv->skl_preferred_vco_freq != vco;

	dev_priv->skl_preferred_vco_freq = vco;

	if (changed)
		intel_update_max_cdclk(dev_priv);
}

984
static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco)
985
{
986
	drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
987 988 989 990 991 992 993 994 995 996

	/*
	 * We always enable DPLL0 with the lowest link rate possible, but still
	 * taking into account the VCO required to operate the eDP panel at the
	 * desired frequency. The usual DP link rates operate with a VCO of
	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
	 * The modeset code is responsible for the selection of the exact link
	 * rate later on, with the constraint of choosing a frequency that
	 * works with vco.
	 */
997 998 999 1000 1001 1002 1003 1004
	if (vco == 8640000)
		return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0);
	else
		return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0);
}

static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
1005 1006 1007 1008 1009 1010
	intel_de_rmw(dev_priv, DPLL_CTRL1,
		     DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
		     DPLL_CTRL1_SSC(SKL_DPLL0) |
		     DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0),
		     DPLL_CTRL1_OVERRIDE(SKL_DPLL0) |
		     skl_dpll0_link_rate(dev_priv, vco));
1011
	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1012

1013 1014
	intel_de_rmw(dev_priv, LCPLL1_CTL,
		     0, LCPLL_PLL_ENABLE);
1015

1016
	if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
1017
		drm_err(&dev_priv->drm, "DPLL0 not locked\n");
1018

1019
	dev_priv->cdclk.hw.vco = vco;
1020 1021 1022 1023 1024 1025 1026

	/* We'll want to keep using the current vco from now on. */
	skl_set_preferred_cdclk_vco(dev_priv, vco);
}

static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
1027 1028 1029
	intel_de_rmw(dev_priv, LCPLL1_CTL,
		     LCPLL_PLL_ENABLE, 0);

1030
	if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
1031
		drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
1032

1033
	dev_priv->cdclk.hw.vco = 0;
1034 1035
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
			      int cdclk, int vco)
{
	switch (cdclk) {
	default:
		drm_WARN_ON(&dev_priv->drm,
			    cdclk != dev_priv->cdclk.hw.bypass);
		drm_WARN_ON(&dev_priv->drm, vco != 0);
		fallthrough;
	case 308571:
	case 337500:
		return CDCLK_FREQ_337_308;
	case 450000:
	case 432000:
		return CDCLK_FREQ_450_432;
	case 540000:
		return CDCLK_FREQ_540;
	case 617143:
	case 675000:
		return CDCLK_FREQ_675_617;
	}
}

1059
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
1060
			  const struct intel_cdclk_config *cdclk_config,
1061
			  enum pipe pipe)
1062
{
1063 1064
	int cdclk = cdclk_config->cdclk;
	int vco = cdclk_config->vco;
1065
	u32 freq_select, cdclk_ctl;
1066 1067
	int ret;

1068 1069 1070 1071 1072 1073 1074 1075
	/*
	 * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
	 * unsupported on SKL. In theory this should never happen since only
	 * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
	 * supported on SKL either, see the above WA. WARN whenever trying to
	 * use the corresponding VCO freq as that always leads to using the
	 * minimum 308MHz CDCLK.
	 */
1076 1077
	drm_WARN_ON_ONCE(&dev_priv->drm,
			 IS_SKYLAKE(dev_priv) && vco == 8640000);
1078

1079 1080 1081 1082 1083
	ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
				SKL_CDCLK_PREPARE_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE,
				SKL_CDCLK_READY_FOR_CHANGE, 3);
	if (ret) {
1084 1085
		drm_err(&dev_priv->drm,
			"Failed to inform PCU about cdclk change (%d)\n", ret);
1086 1087 1088
		return;
	}

1089
	freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco);
1090

1091 1092
	if (dev_priv->cdclk.hw.vco != 0 &&
	    dev_priv->cdclk.hw.vco != vco)
1093 1094
		skl_dpll0_disable(dev_priv);

1095
	cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
1096 1097 1098 1099 1100

	if (dev_priv->cdclk.hw.vco != vco) {
		/* Wa Display #1183: skl,kbl,cfl */
		cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
		cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
1101
		intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
1102 1103 1104 1105
	}

	/* Wa Display #1183: skl,kbl,cfl */
	cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
1106 1107
	intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
	intel_de_posting_read(dev_priv, CDCLK_CTL);
1108

1109
	if (dev_priv->cdclk.hw.vco != vco)
1110 1111
		skl_dpll0_enable(dev_priv, vco);

1112 1113
	/* Wa Display #1183: skl,kbl,cfl */
	cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
1114
	intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
1115 1116

	cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
1117
	intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
1118 1119 1120

	/* Wa Display #1183: skl,kbl,cfl */
	cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
1121 1122
	intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
	intel_de_posting_read(dev_priv, CDCLK_CTL);
1123 1124

	/* inform PCU of the change */
1125
	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
1126
				cdclk_config->voltage_level);
1127 1128 1129 1130 1131 1132

	intel_update_cdclk(dev_priv);
}

static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
1133
	u32 cdctl, expected;
1134 1135 1136 1137 1138 1139

	/*
	 * check if the pre-os initialized the display
	 * There is SWF18 scratchpad register defined which is set by the
	 * pre-os which can be used by the OS drivers to check the status
	 */
1140
	if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
1141 1142 1143
		goto sanitize;

	intel_update_cdclk(dev_priv);
1144
	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
1145

1146
	/* Is PLL enabled and locked ? */
1147
	if (dev_priv->cdclk.hw.vco == 0 ||
1148
	    dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
1149 1150 1151 1152 1153 1154 1155 1156
		goto sanitize;

	/* DPLL okay; verify the cdclock
	 *
	 * Noticed in some instances that the freq selection is correct but
	 * decimal part is programmed wrong from BIOS where pre-os does not
	 * enable display. Verify the same as well.
	 */
1157
	cdctl = intel_de_read(dev_priv, CDCLK_CTL);
1158
	expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
1159
		skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
1160 1161 1162 1163 1164
	if (cdctl == expected)
		/* All well; nothing to sanitize */
		return;

sanitize:
1165
	drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
1166 1167

	/* force cdclk programming */
1168
	dev_priv->cdclk.hw.cdclk = 0;
1169
	/* force full PLL disable + enable */
1170
	dev_priv->cdclk.hw.vco = -1;
1171 1172
}

1173
static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
1174
{
1175
	struct intel_cdclk_config cdclk_config;
1176 1177 1178

	skl_sanitize_cdclk(dev_priv);

1179 1180
	if (dev_priv->cdclk.hw.cdclk != 0 &&
	    dev_priv->cdclk.hw.vco != 0) {
1181 1182 1183 1184 1185 1186
		/*
		 * Use the current vco as our initial
		 * guess as to what the preferred vco is.
		 */
		if (dev_priv->skl_preferred_vco_freq == 0)
			skl_set_preferred_cdclk_vco(dev_priv,
1187
						    dev_priv->cdclk.hw.vco);
1188 1189 1190
		return;
	}

1191
	cdclk_config = dev_priv->cdclk.hw;
1192

1193 1194 1195 1196 1197
	cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
	if (cdclk_config.vco == 0)
		cdclk_config.vco = 8100000;
	cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
	cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
1198

1199
	skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
1200 1201
}

1202
static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
1203
{
1204
	struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
1205

1206 1207 1208
	cdclk_config.cdclk = cdclk_config.bypass;
	cdclk_config.vco = 0;
	cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
1209

1210
	skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
1211 1212
}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
static const struct intel_cdclk_vals bxt_cdclk_table[] = {
	{ .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 },
	{ .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 },
	{ .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 },
	{ .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 },
	{ .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 },
	{}
};

static const struct intel_cdclk_vals glk_cdclk_table[] = {
	{ .refclk = 19200, .cdclk =  79200, .divider = 8, .ratio = 33 },
	{ .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 },
	{ .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 },
	{}
};

static const struct intel_cdclk_vals icl_cdclk_table[] = {
	{ .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
	{ .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
	{ .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
	{ .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 },
	{ .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
	{ .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },

	{ .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 },
	{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
	{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
	{ .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 },
	{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
	{ .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },

	{ .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio =  9 },
	{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
	{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
	{ .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
	{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
	{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
	{}
};

M
Matt Roper 已提交
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
static const struct intel_cdclk_vals rkl_cdclk_table[] = {
	{ .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio =  36 },
	{ .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio =  40 },
	{ .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio =  64 },
	{ .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 },
	{ .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 },
	{ .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 },

	{ .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio =  30 },
	{ .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio =  32 },
	{ .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio =  52 },
	{ .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 },
	{ .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio =  92 },
	{ .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 },

	{ .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 },
	{ .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 },
	{ .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 },
	{ .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 },
	{ .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 },
	{ .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 },
	{}
};

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = {
	{ .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
	{ .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
	{ .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },

	{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
	{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
	{ .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },

	{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
	{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
	{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
	{}
};

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
static const struct intel_cdclk_vals adlp_cdclk_table[] = {
	{ .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 },
	{ .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
	{ .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 },
	{ .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 },
	{ .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 },

	{ .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 },
	{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
	{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
	{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
	{ .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },

	{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
	{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
	{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
	{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
	{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
	{}
};

1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
static const struct intel_cdclk_vals dg2_cdclk_table[] = {
	{ .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio =  9 },
	{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
	{ .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
	{ .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
	{ .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
	{ .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
	{}
};

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
	const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
	int i;

	for (i = 0; table[i].refclk; i++)
		if (table[i].refclk == dev_priv->cdclk.hw.ref &&
		    table[i].cdclk >= min_cdclk)
			return table[i].cdclk;

1333 1334 1335
	drm_WARN(&dev_priv->drm, 1,
		 "Cannot satisfy minimum cdclk %d with refclk %u\n",
		 min_cdclk, dev_priv->cdclk.hw.ref);
1336
	return 0;
1337 1338
}

1339
static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
1340
{
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
	int i;

	if (cdclk == dev_priv->cdclk.hw.bypass)
		return 0;

	for (i = 0; table[i].refclk; i++)
		if (table[i].refclk == dev_priv->cdclk.hw.ref &&
		    table[i].cdclk == cdclk)
			return dev_priv->cdclk.hw.ref * table[i].ratio;

1352 1353
	drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
		 cdclk, dev_priv->cdclk.hw.ref);
1354
	return 0;
1355 1356
}

1357 1358 1359 1360 1361
static u8 bxt_calc_voltage_level(int cdclk)
{
	return DIV_ROUND_UP(cdclk, 25000);
}

1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
static u8 icl_calc_voltage_level(int cdclk)
{
	if (cdclk > 556800)
		return 2;
	else if (cdclk > 312000)
		return 1;
	else
		return 0;
}

static u8 ehl_calc_voltage_level(int cdclk)
{
1374 1375 1376
	if (cdclk > 326400)
		return 3;
	else if (cdclk > 312000)
1377 1378 1379 1380 1381 1382 1383
		return 2;
	else if (cdclk > 180000)
		return 1;
	else
		return 0;
}

1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
static u8 tgl_calc_voltage_level(int cdclk)
{
	if (cdclk > 556800)
		return 3;
	else if (cdclk > 326400)
		return 2;
	else if (cdclk > 312000)
		return 1;
	else
		return 0;
}

1396
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
1397
			       struct intel_cdclk_config *cdclk_config)
1398
{
1399
	u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
1400 1401 1402 1403

	switch (dssm) {
	default:
		MISSING_CASE(dssm);
1404
		fallthrough;
1405
	case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
1406
		cdclk_config->ref = 24000;
1407 1408
		break;
	case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
1409
		cdclk_config->ref = 19200;
1410 1411
		break;
	case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
1412
		cdclk_config->ref = 38400;
1413 1414 1415 1416 1417
		break;
	}
}

static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
1418
			       struct intel_cdclk_config *cdclk_config)
1419 1420 1421
{
	u32 val, ratio;

1422 1423 1424
	if (IS_DG2(dev_priv))
		cdclk_config->ref = 38400;
	else if (DISPLAY_VER(dev_priv) >= 11)
1425
		icl_readout_refclk(dev_priv, cdclk_config);
1426
	else
1427
		cdclk_config->ref = 19200;
1428

1429
	val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
1430 1431 1432 1433 1434 1435
	if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
	    (val & BXT_DE_PLL_LOCK) == 0) {
		/*
		 * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
		 * setting it to zero is a way to signal that.
		 */
1436
		cdclk_config->vco = 0;
1437
		return;
1438
	}
1439

1440
	/*
1441 1442
	 * DISPLAY_VER >= 11 have the ratio directly in the PLL enable register,
	 * gen9lp had it in a separate PLL control register.
1443
	 */
1444 1445
	if (DISPLAY_VER(dev_priv) >= 11)
		ratio = val & ICL_CDCLK_PLL_RATIO_MASK;
1446
	else
1447
		ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
1448

1449
	cdclk_config->vco = ratio * cdclk_config->ref;
1450 1451
}

1452
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
1453
			  struct intel_cdclk_config *cdclk_config)
1454 1455
{
	u32 divider;
1456
	int div;
1457

1458
	bxt_de_pll_readout(dev_priv, cdclk_config);
1459

1460
	if (DISPLAY_VER(dev_priv) >= 12)
1461
		cdclk_config->bypass = cdclk_config->ref / 2;
1462
	else if (DISPLAY_VER(dev_priv) >= 11)
1463
		cdclk_config->bypass = 50000;
1464
	else
1465
		cdclk_config->bypass = cdclk_config->ref;
1466

1467 1468
	if (cdclk_config->vco == 0) {
		cdclk_config->cdclk = cdclk_config->bypass;
1469
		goto out;
1470
	}
1471

1472
	divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488

	switch (divider) {
	case BXT_CDCLK_CD2X_DIV_SEL_1:
		div = 2;
		break;
	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
		div = 3;
		break;
	case BXT_CDCLK_CD2X_DIV_SEL_2:
		div = 4;
		break;
	case BXT_CDCLK_CD2X_DIV_SEL_4:
		div = 8;
		break;
	default:
		MISSING_CASE(divider);
1489
		return;
1490 1491
	}

1492
	cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
1493 1494 1495 1496 1497 1498

 out:
	/*
	 * Can't read this out :( Let's assume it's
	 * at least what the CDCLK frequency requires.
	 */
1499
	cdclk_config->voltage_level =
1500
		intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk);
1501 1502 1503 1504
}

static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
{
1505
	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0);
1506 1507

	/* Timeout 200us */
1508 1509
	if (intel_de_wait_for_clear(dev_priv,
				    BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
1510
		drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
1511

1512
	dev_priv->cdclk.hw.vco = 0;
1513 1514 1515 1516
}

static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
1517
	int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
1518

1519 1520
	intel_de_rmw(dev_priv, BXT_DE_PLL_CTL,
		     BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio));
1521

1522
	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
1523 1524

	/* Timeout 200us */
1525 1526
	if (intel_de_wait_for_set(dev_priv,
				  BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
1527
		drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
1528

1529
	dev_priv->cdclk.hw.vco = vco;
1530 1531
}

1532
static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
1533
{
1534 1535
	intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE,
		     BXT_DE_PLL_PLL_ENABLE, 0);
1536 1537

	/* Timeout 200us */
1538 1539
	if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
		drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n");
1540 1541 1542 1543

	dev_priv->cdclk.hw.vco = 0;
}

1544
static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
1545 1546 1547 1548
{
	int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
	u32 val;

1549
	val = ICL_CDCLK_PLL_RATIO(ratio);
1550
	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
1551 1552

	val |= BXT_DE_PLL_PLL_ENABLE;
1553
	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
1554 1555

	/* Timeout 200us */
1556 1557
	if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
		drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n");
1558 1559 1560 1561

	dev_priv->cdclk.hw.vco = vco;
}

1562 1563 1564 1565 1566 1567
static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
{
	int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
	u32 val;

	/* Write PLL ratio without disabling */
1568
	val = ICL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE;
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);

	/* Submit freq change request */
	val |= BXT_DE_PLL_FREQ_REQ;
	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);

	/* Timeout 200us */
	if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE,
				  BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
		DRM_ERROR("timeout waiting for FREQ change request ack\n");

	val &= ~BXT_DE_PLL_FREQ_REQ;
	intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);

	dev_priv->cdclk.hw.vco = vco;
}

1586 1587
static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
1588
	if (DISPLAY_VER(dev_priv) >= 12) {
1589 1590 1591 1592
		if (pipe == INVALID_PIPE)
			return TGL_CDCLK_CD2X_PIPE_NONE;
		else
			return TGL_CDCLK_CD2X_PIPE(pipe);
1593
	} else if (DISPLAY_VER(dev_priv) >= 11) {
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
		if (pipe == INVALID_PIPE)
			return ICL_CDCLK_CD2X_PIPE_NONE;
		else
			return ICL_CDCLK_CD2X_PIPE(pipe);
	} else {
		if (pipe == INVALID_PIPE)
			return BXT_CDCLK_CD2X_PIPE_NONE;
		else
			return BXT_CDCLK_CD2X_PIPE(pipe);
	}
}

1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
				  int cdclk, int vco)
{
	/* cdclk = vco / 2 / div{1,1.5,2,4} */
	switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
	default:
		drm_WARN_ON(&dev_priv->drm,
			    cdclk != dev_priv->cdclk.hw.bypass);
		drm_WARN_ON(&dev_priv->drm, vco != 0);
		fallthrough;
	case 2:
		return BXT_CDCLK_CD2X_DIV_SEL_1;
	case 3:
		return BXT_CDCLK_CD2X_DIV_SEL_1_5;
	case 4:
		return BXT_CDCLK_CD2X_DIV_SEL_2;
	case 8:
		return BXT_CDCLK_CD2X_DIV_SEL_4;
	}
}

1627
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
1628
			  const struct intel_cdclk_config *cdclk_config,
1629
			  enum pipe pipe)
1630
{
1631 1632
	int cdclk = cdclk_config->cdclk;
	int vco = cdclk_config->vco;
1633
	u32 val;
1634
	int ret;
1635

1636
	/* Inform power controller of upcoming frequency change. */
1637
	if (DISPLAY_VER(dev_priv) >= 11)
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
		ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
					SKL_CDCLK_PREPARE_FOR_CHANGE,
					SKL_CDCLK_READY_FOR_CHANGE,
					SKL_CDCLK_READY_FOR_CHANGE, 3);
	else
		/*
		 * BSpec requires us to wait up to 150usec, but that leads to
		 * timeouts; the 2ms used here is based on experiment.
		 */
		ret = sandybridge_pcode_write_timeout(dev_priv,
						      HSW_PCODE_DE_WRITE_FREQ_REQ,
						      0x80000000, 150, 2);

	if (ret) {
1652 1653 1654
		drm_err(&dev_priv->drm,
			"Failed to inform PCU about cdclk change (err %d, freq %d)\n",
			ret, cdclk);
1655 1656 1657
		return;
	}

1658
	if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) {
1659 1660
		if (dev_priv->cdclk.hw.vco != vco)
			adlp_cdclk_pll_crawl(dev_priv, vco);
1661
	} else if (DISPLAY_VER(dev_priv) >= 11) {
1662 1663
		if (dev_priv->cdclk.hw.vco != 0 &&
		    dev_priv->cdclk.hw.vco != vco)
1664
			icl_cdclk_pll_disable(dev_priv);
1665

1666
		if (dev_priv->cdclk.hw.vco != vco)
1667
			icl_cdclk_pll_enable(dev_priv, vco);
1668 1669 1670 1671 1672 1673 1674 1675
	} else {
		if (dev_priv->cdclk.hw.vco != 0 &&
		    dev_priv->cdclk.hw.vco != vco)
			bxt_de_pll_disable(dev_priv);

		if (dev_priv->cdclk.hw.vco != vco)
			bxt_de_pll_enable(dev_priv, vco);
	}
1676

1677 1678 1679
	val = bxt_cdclk_cd2x_div_sel(dev_priv, cdclk, vco) |
		bxt_cdclk_cd2x_pipe(dev_priv, pipe) |
		skl_cdclk_decimal(cdclk);
1680

1681 1682 1683 1684
	/*
	 * Disable SSA Precharge when CD clock frequency < 500 MHz,
	 * enable otherwise.
	 */
1685 1686
	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
	    cdclk >= 500000)
1687
		val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
1688
	intel_de_write(dev_priv, CDCLK_CTL, val);
1689

1690 1691 1692
	if (pipe != INVALID_PIPE)
		intel_wait_for_vblank(dev_priv, pipe);

1693
	if (DISPLAY_VER(dev_priv) >= 11) {
1694
		ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
1695
					      cdclk_config->voltage_level);
1696 1697 1698 1699 1700 1701 1702 1703 1704
	} else {
		/*
		 * The timeout isn't specified, the 2ms used here is based on
		 * experiment.
		 * FIXME: Waiting for the request completion could be delayed
		 * until the next PCODE request based on BSpec.
		 */
		ret = sandybridge_pcode_write_timeout(dev_priv,
						      HSW_PCODE_DE_WRITE_FREQ_REQ,
1705
						      cdclk_config->voltage_level,
1706 1707 1708
						      150, 2);
	}

1709
	if (ret) {
1710 1711 1712
		drm_err(&dev_priv->drm,
			"PCode CDCLK freq set failed, (err %d, freq %d)\n",
			ret, cdclk);
1713 1714 1715 1716
		return;
	}

	intel_update_cdclk(dev_priv);
1717

1718
	if (DISPLAY_VER(dev_priv) >= 11)
1719 1720 1721 1722
		/*
		 * Can't read out the voltage level :(
		 * Let's just assume everything is as expected.
		 */
1723
		dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level;
1724 1725 1726 1727 1728
}

static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
	u32 cdctl, expected;
1729
	int cdclk, vco;
1730 1731

	intel_update_cdclk(dev_priv);
1732
	intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
1733

1734
	if (dev_priv->cdclk.hw.vco == 0 ||
1735
	    dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
1736 1737 1738 1739 1740 1741 1742 1743
		goto sanitize;

	/* DPLL okay; verify the cdclock
	 *
	 * Some BIOS versions leave an incorrect decimal frequency value and
	 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
	 * so sanitize this register.
	 */
1744
	cdctl = intel_de_read(dev_priv, CDCLK_CTL);
1745 1746 1747 1748 1749
	/*
	 * Let's ignore the pipe field, since BIOS could have configured the
	 * dividers both synching to an active pipe, or asynchronously
	 * (PIPE_NONE).
	 */
1750
	cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
1751

1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
	/* Make sure this is a legal cdclk value for the platform */
	cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk);
	if (cdclk != dev_priv->cdclk.hw.cdclk)
		goto sanitize;

	/* Make sure the VCO is correct for the cdclk */
	vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
	if (vco != dev_priv->cdclk.hw.vco)
		goto sanitize;

	expected = skl_cdclk_decimal(cdclk);

	/* Figure out what CD2X divider we should be using for this cdclk */
1765 1766 1767
	expected |= bxt_cdclk_cd2x_div_sel(dev_priv,
					   dev_priv->cdclk.hw.cdclk,
					   dev_priv->cdclk.hw.vco);
1768

1769 1770 1771 1772
	/*
	 * Disable SSA Precharge when CD clock frequency < 500 MHz,
	 * enable otherwise.
	 */
1773 1774
	if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
	    dev_priv->cdclk.hw.cdclk >= 500000)
1775 1776 1777 1778 1779 1780 1781
		expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;

	if (cdctl == expected)
		/* All well; nothing to sanitize */
		return;

sanitize:
1782
	drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
1783 1784

	/* force cdclk programming */
1785
	dev_priv->cdclk.hw.cdclk = 0;
1786 1787

	/* force full PLL disable + enable */
1788
	dev_priv->cdclk.hw.vco = -1;
1789 1790
}

1791
static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
1792
{
1793
	struct intel_cdclk_config cdclk_config;
1794 1795 1796

	bxt_sanitize_cdclk(dev_priv);

1797 1798
	if (dev_priv->cdclk.hw.cdclk != 0 &&
	    dev_priv->cdclk.hw.vco != 0)
1799 1800
		return;

1801
	cdclk_config = dev_priv->cdclk.hw;
1802

1803 1804 1805 1806 1807
	/*
	 * FIXME:
	 * - The initial CDCLK needs to be read from VBT.
	 *   Need to make this change after VBT has changes for BXT.
	 */
1808 1809 1810
	cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
	cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
	cdclk_config.voltage_level =
1811
		intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
1812

1813
	bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
1814 1815
}

1816
static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
1817
{
1818
	struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
1819

1820 1821 1822
	cdclk_config.cdclk = cdclk_config.bypass;
	cdclk_config.vco = 0;
	cdclk_config.voltage_level =
1823
		intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
1824

1825
	bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
1826 1827
}

1828
/**
1829
 * intel_cdclk_init_hw - Initialize CDCLK hardware
1830 1831 1832 1833 1834 1835 1836
 * @i915: i915 device
 *
 * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
 * sanitizing the state of the hardware if needed. This is generally done only
 * during the display core initialization sequence, after which the DMC will
 * take care of turning CDCLK off/on as needed.
 */
1837
void intel_cdclk_init_hw(struct drm_i915_private *i915)
1838
{
1839
	if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915))
1840
		bxt_cdclk_init_hw(i915);
1841
	else if (DISPLAY_VER(i915) == 9)
1842
		skl_cdclk_init_hw(i915);
1843 1844 1845
}

/**
1846
 * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware
1847 1848 1849 1850 1851
 * @i915: i915 device
 *
 * Uninitialize CDCLK. This is done only during the display core
 * uninitialization sequence.
 */
1852
void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
1853
{
1854
	if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915))
1855
		bxt_cdclk_uninit_hw(i915);
1856
	else if (DISPLAY_VER(i915) == 9)
1857
		skl_cdclk_uninit_hw(i915);
1858 1859
}

1860 1861 1862 1863 1864 1865
static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
				  const struct intel_cdclk_config *a,
				  const struct intel_cdclk_config *b)
{
	int a_div, b_div;

1866
	if (!HAS_CDCLK_CRAWL(dev_priv))
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881
		return false;

	/*
	 * The vco and cd2x divider will change independently
	 * from each, so we disallow cd2x change when crawling.
	 */
	a_div = DIV_ROUND_CLOSEST(a->vco, a->cdclk);
	b_div = DIV_ROUND_CLOSEST(b->vco, b->cdclk);

	return a->vco != 0 && b->vco != 0 &&
		a->vco != b->vco &&
		a_div == b_div &&
		a->ref == b->ref;
}

1882
/**
1883 1884 1885 1886
 * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
 *                             configurations requires a modeset on all pipes
 * @a: first CDCLK configuration
 * @b: second CDCLK configuration
1887 1888
 *
 * Returns:
1889 1890
 * True if changing between the two CDCLK configurations
 * requires all pipes to be off, false if not.
1891
 */
1892 1893
bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
			       const struct intel_cdclk_config *b)
1894
{
1895 1896 1897 1898 1899
	return a->cdclk != b->cdclk ||
		a->vco != b->vco ||
		a->ref != b->ref;
}

1900
/**
1901 1902 1903 1904 1905
 * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK
 *                               configurations requires only a cd2x divider update
 * @dev_priv: i915 device
 * @a: first CDCLK configuration
 * @b: second CDCLK configuration
1906 1907
 *
 * Returns:
1908 1909
 * True if changing between the two CDCLK configurations
 * can be done with just a cd2x divider update, false if not.
1910
 */
1911
static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
1912 1913
					const struct intel_cdclk_config *a,
					const struct intel_cdclk_config *b)
1914 1915
{
	/* Older hw doesn't have the capability */
1916
	if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv))
1917 1918 1919 1920 1921 1922 1923
		return false;

	return a->cdclk != b->cdclk &&
		a->vco == b->vco &&
		a->ref == b->ref;
}

1924
/**
1925 1926 1927
 * intel_cdclk_changed - Determine if two CDCLK configurations are different
 * @a: first CDCLK configuration
 * @b: second CDCLK configuration
1928 1929
 *
 * Returns:
1930
 * True if the CDCLK configurations don't match, false if they do.
1931
 */
1932 1933
static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
				const struct intel_cdclk_config *b)
1934 1935 1936
{
	return intel_cdclk_needs_modeset(a, b) ||
		a->voltage_level != b->voltage_level;
1937 1938
}

1939 1940
void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
			     const char *context)
1941
{
1942
	DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
1943 1944 1945
			 context, cdclk_config->cdclk, cdclk_config->vco,
			 cdclk_config->ref, cdclk_config->bypass,
			 cdclk_config->voltage_level);
1946 1947
}

1948
/**
1949
 * intel_set_cdclk - Push the CDCLK configuration to the hardware
1950
 * @dev_priv: i915 device
1951
 * @cdclk_config: new CDCLK configuration
1952
 * @pipe: pipe with which to synchronize the update
1953 1954 1955 1956
 *
 * Program the hardware based on the passed in CDCLK state,
 * if necessary.
 */
1957
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
1958
			    const struct intel_cdclk_config *cdclk_config,
1959
			    enum pipe pipe)
1960
{
1961 1962
	struct intel_encoder *encoder;

1963
	if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
1964 1965
		return;

1966
	if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk))
1967 1968
		return;

1969
	intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
1970

1971 1972 1973 1974 1975 1976
	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

		intel_psr_pause(intel_dp);
	}

1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
	/*
	 * Lock aux/gmbus while we change cdclk in case those
	 * functions use cdclk. Not all platforms/ports do,
	 * but we'll lock them all for simplicity.
	 */
	mutex_lock(&dev_priv->gmbus_mutex);
	for_each_intel_dp(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

		mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
				     &dev_priv->gmbus_mutex);
	}

1990
	intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe);
1991

1992 1993 1994 1995 1996 1997 1998
	for_each_intel_dp(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

		mutex_unlock(&intel_dp->aux.hw_mutex);
	}
	mutex_unlock(&dev_priv->gmbus_mutex);

1999 2000 2001 2002 2003 2004
	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

		intel_psr_resume(intel_dp);
	}

2005 2006 2007
	if (drm_WARN(&dev_priv->drm,
		     intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
		     "cdclk state doesn't match!\n")) {
2008 2009
		intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]");
		intel_dump_cdclk_config(cdclk_config, "[sw state]");
2010
	}
2011 2012
}

2013
/**
2014 2015
 * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
 * @state: intel atomic state
2016
 *
2017 2018
 * Program the hardware before updating the HW plane state based on the
 * new CDCLK state, if necessary.
2019 2020
 */
void
2021
intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
2022
{
2023
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2024 2025 2026 2027
	const struct intel_cdclk_state *old_cdclk_state =
		intel_atomic_get_old_cdclk_state(state);
	const struct intel_cdclk_state *new_cdclk_state =
		intel_atomic_get_new_cdclk_state(state);
2028
	enum pipe pipe = new_cdclk_state->pipe;
2029

2030 2031 2032 2033
	if (!intel_cdclk_changed(&old_cdclk_state->actual,
				 &new_cdclk_state->actual))
		return;

2034
	if (pipe == INVALID_PIPE ||
2035
	    old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
2036
		drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
2037

2038
		intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
2039
	}
2040 2041 2042
}

/**
2043 2044
 * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
 * @state: intel atomic state
2045
 *
2046
 * Program the hardware after updating the HW plane state based on the
2047
 * new CDCLK state, if necessary.
2048 2049
 */
void
2050
intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
2051
{
2052
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2053 2054 2055 2056
	const struct intel_cdclk_state *old_cdclk_state =
		intel_atomic_get_old_cdclk_state(state);
	const struct intel_cdclk_state *new_cdclk_state =
		intel_atomic_get_new_cdclk_state(state);
2057
	enum pipe pipe = new_cdclk_state->pipe;
2058

2059 2060 2061 2062
	if (!intel_cdclk_changed(&old_cdclk_state->actual,
				 &new_cdclk_state->actual))
		return;

2063
	if (pipe != INVALID_PIPE &&
2064
	    old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
2065
		drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
2066

2067
		intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
2068
	}
2069 2070
}

2071
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
2072
{
2073
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2074 2075
	int pixel_rate = crtc_state->pixel_rate;

2076
	if (DISPLAY_VER(dev_priv) >= 10)
2077
		return DIV_ROUND_UP(pixel_rate, 2);
2078
	else if (DISPLAY_VER(dev_priv) == 9 ||
2079 2080 2081 2082
		 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
		return pixel_rate;
	else if (IS_CHERRYVIEW(dev_priv))
		return DIV_ROUND_UP(pixel_rate * 100, 95);
2083 2084
	else if (crtc_state->double_wide)
		return DIV_ROUND_UP(pixel_rate * 100, 90 * 2);
2085 2086 2087 2088
	else
		return DIV_ROUND_UP(pixel_rate * 100, 90);
}

2089 2090
static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
2091
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_plane *plane;
	int min_cdclk = 0;

	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
		min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk);

	return min_cdclk;
}

2102
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
2103 2104
{
	struct drm_i915_private *dev_priv =
2105
		to_i915(crtc_state->uapi.crtc->dev);
2106 2107
	int min_cdclk;

2108
	if (!crtc_state->hw.enable)
2109 2110
		return 0;

2111
	min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
2112 2113

	/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
2114
	if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state))
2115
		min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
2116

2117 2118 2119
	/* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
	 * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
	 * there may be audio corruption or screen corruption." This cdclk
2120
	 * restriction for GLK is 316.8 MHz.
2121 2122 2123 2124
	 */
	if (intel_crtc_has_dp_encoder(crtc_state) &&
	    crtc_state->has_audio &&
	    crtc_state->port_clock >= 540000 &&
2125
	    crtc_state->lane_count == 4) {
2126
		if (DISPLAY_VER(dev_priv) == 10) {
2127
			/* Display WA #1145: glk */
2128
			min_cdclk = max(316800, min_cdclk);
2129
		} else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) {
2130 2131 2132
			/* Display WA #1144: skl,bxt */
			min_cdclk = max(432000, min_cdclk);
		}
2133
	}
2134

2135 2136
	/*
	 * According to BSpec, "The CD clock frequency must be at least twice
2137 2138
	 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
	 */
2139
	if (crtc_state->has_audio && DISPLAY_VER(dev_priv) >= 9)
2140
		min_cdclk = max(2 * 96000, min_cdclk);
2141

2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
	/*
	 * "For DP audio configuration, cdclk frequency shall be set to
	 *  meet the following requirements:
	 *  DP Link Frequency(MHz) | Cdclk frequency(MHz)
	 *  270                    | 320 or higher
	 *  162                    | 200 or higher"
	 */
	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
	    intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
		min_cdclk = max(crtc_state->port_clock, min_cdclk);

2153 2154 2155 2156 2157 2158 2159 2160
	/*
	 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
	 * than 320000KHz.
	 */
	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
	    IS_VALLEYVIEW(dev_priv))
		min_cdclk = max(320000, min_cdclk);

2161 2162 2163 2164 2165 2166 2167 2168 2169
	/*
	 * On Geminilake once the CDCLK gets as low as 79200
	 * picture gets unstable, despite that values are
	 * correct for DSI PLL and DE PLL.
	 */
	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
	    IS_GEMINILAKE(dev_priv))
		min_cdclk = max(158400, min_cdclk);

2170 2171 2172
	/* Account for additional needs from the planes */
	min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);

2173
	/*
2174 2175 2176
	 * When we decide to use only one VDSC engine, since
	 * each VDSC operates with 1 ppc throughput, pixel clock
	 * cannot be higher than the VDSC clock (cdclk)
2177
	 */
2178
	if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split)
2179 2180
		min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);

2181 2182 2183 2184 2185 2186 2187 2188 2189
	/*
	 * HACK. Currently for TGL platforms we calculate
	 * min_cdclk initially based on pixel_rate divided
	 * by 2, accounting for also plane requirements,
	 * however in some cases the lowest possible CDCLK
	 * doesn't work and causing the underruns.
	 * Explicitly stating here that this seems to be currently
	 * rather a Hack, than final solution.
	 */
2190 2191 2192 2193 2194 2195 2196 2197 2198
	if (IS_TIGERLAKE(dev_priv)) {
		/*
		 * Clamp to max_cdclk_freq in case pixel rate is higher,
		 * in order not to break an 8K, but still leave W/A at place.
		 */
		min_cdclk = max_t(int, min_cdclk,
				  min_t(int, crtc_state->pixel_rate,
					dev_priv->max_cdclk_freq));
	}
2199

2200
	if (min_cdclk > dev_priv->max_cdclk_freq) {
2201 2202 2203
		drm_dbg_kms(&dev_priv->drm,
			    "required cdclk (%d kHz) exceeds max (%d kHz)\n",
			    min_cdclk, dev_priv->max_cdclk_freq);
2204 2205 2206
		return -EINVAL;
	}

2207
	return min_cdclk;
2208 2209
}

2210
static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
2211
{
2212
	struct intel_atomic_state *state = cdclk_state->base.state;
2213 2214
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_bw_state *bw_state = NULL;
2215
	struct intel_crtc *crtc;
2216
	struct intel_crtc_state *crtc_state;
2217
	int min_cdclk, i;
2218
	enum pipe pipe;
2219

2220
	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
2221 2222
		int ret;

2223 2224 2225 2226
		min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
		if (min_cdclk < 0)
			return min_cdclk;

2227 2228 2229 2230
		bw_state = intel_atomic_get_bw_state(state);
		if (IS_ERR(bw_state))
			return PTR_ERR(bw_state);

2231
		if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
2232 2233
			continue;

2234
		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
2235

2236
		ret = intel_atomic_lock_global_state(&cdclk_state->base);
2237 2238
		if (ret)
			return ret;
2239
	}
2240

2241
	min_cdclk = cdclk_state->force_min_cdclk;
2242 2243
	for_each_pipe(dev_priv, pipe) {
		min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
2244

2245 2246
		if (!bw_state)
			continue;
2247 2248 2249

		min_cdclk = max(bw_state->min_cdclk, min_cdclk);
	}
2250

2251
	return min_cdclk;
2252 2253
}

2254
/*
2255
 * Account for port clock min voltage level requirements.
2256
 * This only really does something on DISPLA_VER >= 11 but can be
2257 2258
 * called on earlier platforms as well.
 *
2259 2260 2261 2262 2263 2264 2265 2266
 * Note that this functions assumes that 0 is
 * the lowest voltage value, and higher values
 * correspond to increasingly higher voltages.
 *
 * Should that relationship no longer hold on
 * future platforms this code will need to be
 * adjusted.
 */
2267
static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
2268
{
2269
	struct intel_atomic_state *state = cdclk_state->base.state;
2270 2271 2272 2273 2274 2275 2276 2277
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_crtc *crtc;
	struct intel_crtc_state *crtc_state;
	u8 min_voltage_level;
	int i;
	enum pipe pipe;

	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
2278 2279
		int ret;

2280
		if (crtc_state->hw.enable)
2281
			min_voltage_level = crtc_state->min_voltage_level;
2282
		else
2283 2284
			min_voltage_level = 0;

2285
		if (cdclk_state->min_voltage_level[crtc->pipe] == min_voltage_level)
2286 2287
			continue;

2288
		cdclk_state->min_voltage_level[crtc->pipe] = min_voltage_level;
2289

2290
		ret = intel_atomic_lock_global_state(&cdclk_state->base);
2291 2292
		if (ret)
			return ret;
2293 2294 2295 2296
	}

	min_voltage_level = 0;
	for_each_pipe(dev_priv, pipe)
2297
		min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
2298 2299 2300 2301 2302
					min_voltage_level);

	return min_voltage_level;
}

2303
static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
2304
{
2305
	struct intel_atomic_state *state = cdclk_state->base.state;
2306
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2307
	int min_cdclk, cdclk;
2308

2309
	min_cdclk = intel_compute_min_cdclk(cdclk_state);
2310 2311
	if (min_cdclk < 0)
		return min_cdclk;
2312

2313
	cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
2314

2315 2316
	cdclk_state->logical.cdclk = cdclk;
	cdclk_state->logical.voltage_level =
2317
		vlv_calc_voltage_level(dev_priv, cdclk);
2318

2319
	if (!cdclk_state->active_pipes) {
2320
		cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
2321

2322 2323
		cdclk_state->actual.cdclk = cdclk;
		cdclk_state->actual.voltage_level =
2324
			vlv_calc_voltage_level(dev_priv, cdclk);
2325
	} else {
2326
		cdclk_state->actual = cdclk_state->logical;
2327
	}
2328 2329 2330 2331

	return 0;
}

2332
static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
2333
{
2334 2335
	int min_cdclk, cdclk;

2336
	min_cdclk = intel_compute_min_cdclk(cdclk_state);
2337 2338
	if (min_cdclk < 0)
		return min_cdclk;
2339 2340 2341 2342 2343

	/*
	 * FIXME should also account for plane ratio
	 * once 64bpp pixel formats are supported.
	 */
2344
	cdclk = bdw_calc_cdclk(min_cdclk);
2345

2346 2347
	cdclk_state->logical.cdclk = cdclk;
	cdclk_state->logical.voltage_level =
2348
		bdw_calc_voltage_level(cdclk);
2349

2350
	if (!cdclk_state->active_pipes) {
2351
		cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk);
2352

2353 2354
		cdclk_state->actual.cdclk = cdclk;
		cdclk_state->actual.voltage_level =
2355
			bdw_calc_voltage_level(cdclk);
2356
	} else {
2357
		cdclk_state->actual = cdclk_state->logical;
2358
	}
2359 2360 2361 2362

	return 0;
}

2363
static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
2364
{
2365
	struct intel_atomic_state *state = cdclk_state->base.state;
2366
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2367 2368 2369 2370
	struct intel_crtc *crtc;
	struct intel_crtc_state *crtc_state;
	int vco, i;

2371
	vco = cdclk_state->logical.vco;
2372 2373 2374
	if (!vco)
		vco = dev_priv->skl_preferred_vco_freq;

2375
	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
2376
		if (!crtc_state->hw.enable)
2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399
			continue;

		if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
			continue;

		/*
		 * DPLL0 VCO may need to be adjusted to get the correct
		 * clock for eDP. This will affect cdclk as well.
		 */
		switch (crtc_state->port_clock / 2) {
		case 108000:
		case 216000:
			vco = 8640000;
			break;
		default:
			vco = 8100000;
			break;
		}
	}

	return vco;
}

2400
static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
2401
{
2402 2403
	int min_cdclk, cdclk, vco;

2404
	min_cdclk = intel_compute_min_cdclk(cdclk_state);
2405 2406
	if (min_cdclk < 0)
		return min_cdclk;
2407

2408
	vco = skl_dpll0_vco(cdclk_state);
2409 2410 2411 2412 2413

	/*
	 * FIXME should also account for plane ratio
	 * once 64bpp pixel formats are supported.
	 */
2414
	cdclk = skl_calc_cdclk(min_cdclk, vco);
2415

2416 2417 2418
	cdclk_state->logical.vco = vco;
	cdclk_state->logical.cdclk = cdclk;
	cdclk_state->logical.voltage_level =
2419
		skl_calc_voltage_level(cdclk);
2420

2421
	if (!cdclk_state->active_pipes) {
2422
		cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco);
2423

2424 2425 2426
		cdclk_state->actual.vco = vco;
		cdclk_state->actual.cdclk = cdclk;
		cdclk_state->actual.voltage_level =
2427
			skl_calc_voltage_level(cdclk);
2428
	} else {
2429
		cdclk_state->actual = cdclk_state->logical;
2430
	}
2431 2432 2433 2434

	return 0;
}

2435
static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
2436
{
2437
	struct intel_atomic_state *state = cdclk_state->base.state;
2438
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2439
	int min_cdclk, min_voltage_level, cdclk, vco;
2440

2441
	min_cdclk = intel_compute_min_cdclk(cdclk_state);
2442 2443
	if (min_cdclk < 0)
		return min_cdclk;
2444

2445
	min_voltage_level = bxt_compute_min_voltage_level(cdclk_state);
2446 2447 2448
	if (min_voltage_level < 0)
		return min_voltage_level;

2449 2450
	cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
	vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
2451

2452 2453 2454
	cdclk_state->logical.vco = vco;
	cdclk_state->logical.cdclk = cdclk;
	cdclk_state->logical.voltage_level =
2455
		max_t(int, min_voltage_level,
2456
		      intel_cdclk_calc_voltage_level(dev_priv, cdclk));
2457

2458
	if (!cdclk_state->active_pipes) {
2459
		cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
2460
		vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
2461

2462 2463 2464
		cdclk_state->actual.vco = vco;
		cdclk_state->actual.cdclk = cdclk;
		cdclk_state->actual.voltage_level =
2465
			intel_cdclk_calc_voltage_level(dev_priv, cdclk);
2466
	} else {
2467
		cdclk_state->actual = cdclk_state->logical;
2468 2469 2470 2471 2472
	}

	return 0;
}

2473
static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
2474 2475 2476 2477 2478 2479 2480 2481
{
	int min_cdclk;

	/*
	 * We can't change the cdclk frequency, but we still want to
	 * check that the required minimum frequency doesn't exceed
	 * the actual cdclk frequency.
	 */
2482
	min_cdclk = intel_compute_min_cdclk(cdclk_state);
2483 2484 2485 2486 2487 2488
	if (min_cdclk < 0)
		return min_cdclk;

	return 0;
}

2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj)
{
	struct intel_cdclk_state *cdclk_state;

	cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL);
	if (!cdclk_state)
		return NULL;

	cdclk_state->pipe = INVALID_PIPE;

	return &cdclk_state->base;
}

static void intel_cdclk_destroy_state(struct intel_global_obj *obj,
				      struct intel_global_state *state)
{
	kfree(state);
}

static const struct intel_global_state_funcs intel_cdclk_funcs = {
	.atomic_duplicate_state = intel_cdclk_duplicate_state,
	.atomic_destroy_state = intel_cdclk_destroy_state,
};

struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_global_state *cdclk_state;

	cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj);
	if (IS_ERR(cdclk_state))
		return ERR_CAST(cdclk_state);

	return to_intel_cdclk_state(cdclk_state);
}

int intel_cdclk_init(struct drm_i915_private *dev_priv)
{
	struct intel_cdclk_state *cdclk_state;

	cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
	if (!cdclk_state)
		return -ENOMEM;

	intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj,
				     &cdclk_state->base, &intel_cdclk_funcs);

	return 0;
}

2540 2541 2542
int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2543 2544
	const struct intel_cdclk_state *old_cdclk_state;
	struct intel_cdclk_state *new_cdclk_state;
2545
	enum pipe pipe = INVALID_PIPE;
2546 2547
	int ret;

2548 2549 2550
	new_cdclk_state = intel_atomic_get_cdclk_state(state);
	if (IS_ERR(new_cdclk_state))
		return PTR_ERR(new_cdclk_state);
2551

2552
	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
2553

2554 2555 2556
	new_cdclk_state->active_pipes =
		intel_calc_active_pipes(state, old_cdclk_state->active_pipes);

2557
	ret = intel_cdclk_modeset_calc_cdclk(dev_priv, new_cdclk_state);
2558 2559 2560
	if (ret)
		return ret;

2561 2562
	if (intel_cdclk_changed(&old_cdclk_state->actual,
				&new_cdclk_state->actual)) {
2563 2564 2565 2566
		/*
		 * Also serialize commits across all crtcs
		 * if the actual hw needs to be poked.
		 */
2567
		ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
2568 2569
		if (ret)
			return ret;
2570
	} else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
2571
		   old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk ||
2572
		   intel_cdclk_changed(&old_cdclk_state->logical,
2573
				       &new_cdclk_state->logical)) {
2574
		ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
2575
		if (ret)
2576
			return ret;
2577 2578
	} else {
		return 0;
2579 2580
	}

2581
	if (is_power_of_2(new_cdclk_state->active_pipes) &&
2582
	    intel_cdclk_can_cd2x_update(dev_priv,
2583 2584
					&old_cdclk_state->actual,
					&new_cdclk_state->actual)) {
2585 2586 2587
		struct intel_crtc *crtc;
		struct intel_crtc_state *crtc_state;

2588
		pipe = ilog2(new_cdclk_state->active_pipes);
2589
		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
2590 2591 2592 2593 2594

		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
		if (IS_ERR(crtc_state))
			return PTR_ERR(crtc_state);

2595
		if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
2596 2597 2598
			pipe = INVALID_PIPE;
	}

2599 2600 2601 2602 2603 2604
	if (intel_cdclk_can_crawl(dev_priv,
				  &old_cdclk_state->actual,
				  &new_cdclk_state->actual)) {
		drm_dbg_kms(&dev_priv->drm,
			    "Can change cdclk via crawl\n");
	} else if (pipe != INVALID_PIPE) {
2605
		new_cdclk_state->pipe = pipe;
2606

2607
		drm_dbg_kms(&dev_priv->drm,
2608
			    "Can change cdclk cd2x divider with pipe %c active\n",
2609
			    pipe_name(pipe));
2610 2611
	} else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
					     &new_cdclk_state->actual)) {
2612
		/* All pipes must be switched off while we change the cdclk. */
2613 2614 2615 2616
		ret = intel_modeset_all_pipes(state);
		if (ret)
			return ret;

2617 2618
		drm_dbg_kms(&dev_priv->drm,
			    "Modeset required for cdclk change\n");
2619 2620
	}

2621 2622
	drm_dbg_kms(&dev_priv->drm,
		    "New cdclk calculated to be logical %u kHz, actual %u kHz\n",
2623 2624
		    new_cdclk_state->logical.cdclk,
		    new_cdclk_state->actual.cdclk);
2625 2626
	drm_dbg_kms(&dev_priv->drm,
		    "New voltage level calculated to be logical %u, actual %u\n",
2627 2628
		    new_cdclk_state->logical.voltage_level,
		    new_cdclk_state->actual.voltage_level);
2629 2630 2631 2632

	return 0;
}

2633 2634 2635 2636
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
	int max_cdclk_freq = dev_priv->max_cdclk_freq;

2637
	if (DISPLAY_VER(dev_priv) >= 10)
2638
		return 2 * max_cdclk_freq;
2639
	else if (DISPLAY_VER(dev_priv) == 9 ||
2640
		 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2641 2642 2643
		return max_cdclk_freq;
	else if (IS_CHERRYVIEW(dev_priv))
		return max_cdclk_freq*95/100;
2644
	else if (DISPLAY_VER(dev_priv) < 4)
2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
		return 2*max_cdclk_freq*90/100;
	else
		return max_cdclk_freq*90/100;
}

/**
 * intel_update_max_cdclk - Determine the maximum support CDCLK frequency
 * @dev_priv: i915 device
 *
 * Determine the maximum CDCLK frequency the platform supports, and also
 * derive the maximum dot clock frequency the maximum CDCLK frequency
 * allows.
 */
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
2660
	if (IS_JSL_EHL(dev_priv)) {
2661 2662 2663 2664
		if (dev_priv->cdclk.hw.ref == 24000)
			dev_priv->max_cdclk_freq = 552000;
		else
			dev_priv->max_cdclk_freq = 556800;
2665
	} else if (DISPLAY_VER(dev_priv) >= 11) {
2666 2667 2668 2669
		if (dev_priv->cdclk.hw.ref == 24000)
			dev_priv->max_cdclk_freq = 648000;
		else
			dev_priv->max_cdclk_freq = 652800;
2670 2671 2672 2673
	} else if (IS_GEMINILAKE(dev_priv)) {
		dev_priv->max_cdclk_freq = 316800;
	} else if (IS_BROXTON(dev_priv)) {
		dev_priv->max_cdclk_freq = 624000;
2674
	} else if (DISPLAY_VER(dev_priv) == 9) {
2675
		u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
2676 2677 2678
		int max_cdclk, vco;

		vco = dev_priv->skl_preferred_vco_freq;
2679
		drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702

		/*
		 * Use the lower (vco 8640) cdclk values as a
		 * first guess. skl_calc_cdclk() will correct it
		 * if the preferred vco is 8100 instead.
		 */
		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
			max_cdclk = 617143;
		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
			max_cdclk = 540000;
		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
			max_cdclk = 432000;
		else
			max_cdclk = 308571;

		dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
	} else if (IS_BROADWELL(dev_priv))  {
		/*
		 * FIXME with extra cooling we can allow
		 * 540 MHz for ULX and 675 Mhz for ULT.
		 * How can we know if extra cooling is
		 * available? PCI ID, VTB, something else?
		 */
2703
		if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716
			dev_priv->max_cdclk_freq = 450000;
		else if (IS_BDW_ULX(dev_priv))
			dev_priv->max_cdclk_freq = 450000;
		else if (IS_BDW_ULT(dev_priv))
			dev_priv->max_cdclk_freq = 540000;
		else
			dev_priv->max_cdclk_freq = 675000;
	} else if (IS_CHERRYVIEW(dev_priv)) {
		dev_priv->max_cdclk_freq = 320000;
	} else if (IS_VALLEYVIEW(dev_priv)) {
		dev_priv->max_cdclk_freq = 400000;
	} else {
		/* otherwise assume cdclk is fixed */
2717
		dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk;
2718 2719 2720 2721
	}

	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);

2722 2723
	drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
		dev_priv->max_cdclk_freq);
2724

2725 2726
	drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
		dev_priv->max_dotclk_freq);
2727 2728 2729 2730 2731 2732 2733 2734 2735 2736
}

/**
 * intel_update_cdclk - Determine the current CDCLK frequency
 * @dev_priv: i915 device
 *
 * Determine the current CDCLK frequency.
 */
void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
2737
	intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw);
2738 2739 2740 2741 2742 2743 2744 2745

	/*
	 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
	 * Programmng [sic] note: bit[9:2] should be programmed to the number
	 * of cdclk that generates 4MHz reference clock freq which is used to
	 * generate GMBus clock. This will vary with the cdclk freq.
	 */
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2746
		intel_de_write(dev_priv, GMBUSFREQ_VLV,
V
Ville Syrjälä 已提交
2747
			       DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
2748 2749
}

2750 2751 2752 2753 2754 2755
static int dg1_rawclk(struct drm_i915_private *dev_priv)
{
	/*
	 * DG1 always uses a 38.4 MHz rawclk.  The bspec tells us
	 * "Program Numerator=2, Denominator=4, Divider=37 decimal."
	 */
2756 2757
	intel_de_write(dev_priv, PCH_RAWCLK_FREQ,
		       CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2));
2758 2759 2760 2761

	return 38400;
}

2762 2763 2764 2765 2766
static int cnp_rawclk(struct drm_i915_private *dev_priv)
{
	u32 rawclk;
	int divider, fraction;

2767
	if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
2768 2769 2770 2771 2772 2773 2774 2775 2776
		/* 24 MHz */
		divider = 24000;
		fraction = 0;
	} else {
		/* 19.2 MHz */
		divider = 19000;
		fraction = 200;
	}

2777
	rawclk = CNP_RAWCLK_DIV(divider / 1000);
2778 2779
	if (fraction) {
		int numerator = 1;
2780

2781 2782
		rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
							   fraction) - 1);
2783
		if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2784
			rawclk |= ICP_RAWCLK_NUM(numerator);
2785 2786
	}

2787
	intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk);
2788
	return divider + fraction;
2789 2790
}

2791 2792
static int pch_rawclk(struct drm_i915_private *dev_priv)
{
2793
	return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
2794 2795 2796 2797 2798 2799 2800 2801 2802
}

static int vlv_hrawclk(struct drm_i915_private *dev_priv)
{
	/* RAWCLK_FREQ_VLV register updated from power well code */
	return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
				      CCK_DISPLAY_REF_CLOCK_CONTROL);
}

2803
static int i9xx_hrawclk(struct drm_i915_private *dev_priv)
2804
{
2805
	u32 clkcfg;
2806

2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
	/*
	 * hrawclock is 1/4 the FSB frequency
	 *
	 * Note that this only reads the state of the FSB
	 * straps, not the actual FSB frequency. Some BIOSen
	 * let you configure each independently. Ideally we'd
	 * read out the actual FSB frequency but sadly we
	 * don't know which registers have that information,
	 * and all the relevant docs have gone to bit heaven :(
	 */
2817 2818
	clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK;

2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855
	if (IS_MOBILE(dev_priv)) {
		switch (clkcfg) {
		case CLKCFG_FSB_400:
			return 100000;
		case CLKCFG_FSB_533:
			return 133333;
		case CLKCFG_FSB_667:
			return 166667;
		case CLKCFG_FSB_800:
			return 200000;
		case CLKCFG_FSB_1067:
			return 266667;
		case CLKCFG_FSB_1333:
			return 333333;
		default:
			MISSING_CASE(clkcfg);
			return 133333;
		}
	} else {
		switch (clkcfg) {
		case CLKCFG_FSB_400_ALT:
			return 100000;
		case CLKCFG_FSB_533:
			return 133333;
		case CLKCFG_FSB_667:
			return 166667;
		case CLKCFG_FSB_800:
			return 200000;
		case CLKCFG_FSB_1067_ALT:
			return 266667;
		case CLKCFG_FSB_1333_ALT:
			return 333333;
		case CLKCFG_FSB_1600_ALT:
			return 400000;
		default:
			return 133333;
		}
2856 2857 2858 2859
	}
}

/**
2860
 * intel_read_rawclk - Determine the current RAWCLK frequency
2861 2862 2863 2864 2865
 * @dev_priv: i915 device
 *
 * Determine the current RAWCLK frequency. RAWCLK is a fixed
 * frequency clock so this needs to done only once.
 */
2866
u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
2867
{
2868 2869
	u32 freq;

2870 2871 2872
	if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
		freq = dg1_rawclk(dev_priv);
	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
2873
		freq = cnp_rawclk(dev_priv);
2874
	else if (HAS_PCH_SPLIT(dev_priv))
2875
		freq = pch_rawclk(dev_priv);
2876
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2877
		freq = vlv_hrawclk(dev_priv);
2878
	else if (DISPLAY_VER(dev_priv) >= 3)
2879
		freq = i9xx_hrawclk(dev_priv);
2880 2881
	else
		/* no rawclk on other platforms, or no need to know it */
2882
		return 0;
2883

2884
	return freq;
2885 2886
}

2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037
static struct intel_cdclk_funcs tgl_cdclk_funcs = {
	.get_cdclk = bxt_get_cdclk,
	.set_cdclk = bxt_set_cdclk,
	.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
	.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
	.calc_voltage_level = tgl_calc_voltage_level,
};

static struct intel_cdclk_funcs ehl_cdclk_funcs = {
	.get_cdclk = bxt_get_cdclk,
	.set_cdclk = bxt_set_cdclk,
	.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
	.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
	.calc_voltage_level = ehl_calc_voltage_level,
};

static struct intel_cdclk_funcs icl_cdclk_funcs = {
	.get_cdclk = bxt_get_cdclk,
	.set_cdclk = bxt_set_cdclk,
	.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
	.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
	.calc_voltage_level = icl_calc_voltage_level,
};

static struct intel_cdclk_funcs bxt_cdclk_funcs = {
	.get_cdclk = bxt_get_cdclk,
	.set_cdclk = bxt_set_cdclk,
	.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
	.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
	.calc_voltage_level = bxt_calc_voltage_level,
};

static struct intel_cdclk_funcs skl_cdclk_funcs = {
	.get_cdclk = skl_get_cdclk,
	.set_cdclk = skl_set_cdclk,
	.bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
	.modeset_calc_cdclk = skl_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs bdw_cdclk_funcs = {
	.get_cdclk = bdw_get_cdclk,
	.set_cdclk = bdw_set_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = bdw_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs chv_cdclk_funcs = {
	.get_cdclk = vlv_get_cdclk,
	.set_cdclk = chv_set_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = vlv_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs vlv_cdclk_funcs = {
	.get_cdclk = vlv_get_cdclk,
	.set_cdclk = vlv_set_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = vlv_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs hsw_cdclk_funcs = {
	.get_cdclk = hsw_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

/* SNB, IVB, 965G, 945G */
static struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = {
	.get_cdclk = fixed_400mhz_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs ilk_cdclk_funcs = {
	.get_cdclk = fixed_450mhz_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs gm45_cdclk_funcs = {
	.get_cdclk = gm45_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

/* G45 uses G33 */

static struct intel_cdclk_funcs i965gm_cdclk_funcs = {
	.get_cdclk = i965gm_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

/* i965G uses fixed 400 */

static struct intel_cdclk_funcs pnv_cdclk_funcs = {
	.get_cdclk = pnv_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs g33_cdclk_funcs = {
	.get_cdclk = g33_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs i945gm_cdclk_funcs = {
	.get_cdclk = i945gm_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

/* i945G uses fixed 400 */

static struct intel_cdclk_funcs i915gm_cdclk_funcs = {
	.get_cdclk = i915gm_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs i915g_cdclk_funcs = {
	.get_cdclk = fixed_333mhz_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs i865g_cdclk_funcs = {
	.get_cdclk = fixed_266mhz_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs i85x_cdclk_funcs = {
	.get_cdclk = i85x_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs i845g_cdclk_funcs = {
	.get_cdclk = fixed_200mhz_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

static struct intel_cdclk_funcs i830_cdclk_funcs = {
	.get_cdclk = fixed_133mhz_get_cdclk,
	.bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
	.modeset_calc_cdclk = fixed_modeset_calc_cdclk,
};

3038 3039 3040 3041 3042 3043
/**
 * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
 * @dev_priv: i915 device
 */
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
3044
	if (IS_DG2(dev_priv)) {
3045
		dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
3046 3047
		dev_priv->cdclk.table = dg2_cdclk_table;
	} else if (IS_ALDERLAKE_P(dev_priv)) {
3048
		dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
3049
		/* Wa_22011320316:adl-p[a0] */
3050
		if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
3051 3052 3053
			dev_priv->cdclk.table = adlp_a_step_cdclk_table;
		else
			dev_priv->cdclk.table = adlp_cdclk_table;
3054
	} else if (IS_ROCKETLAKE(dev_priv)) {
3055
		dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
M
Matt Roper 已提交
3056
		dev_priv->cdclk.table = rkl_cdclk_table;
3057
	} else if (DISPLAY_VER(dev_priv) >= 12) {
3058
		dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
3059
		dev_priv->cdclk.table = icl_cdclk_table;
3060
	} else if (IS_JSL_EHL(dev_priv)) {
3061
		dev_priv->cdclk_funcs = &ehl_cdclk_funcs;
3062
		dev_priv->cdclk.table = icl_cdclk_table;
3063
	} else if (DISPLAY_VER(dev_priv) >= 11) {
3064
		dev_priv->cdclk_funcs = &icl_cdclk_funcs;
3065
		dev_priv->cdclk.table = icl_cdclk_table;
3066
	} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
3067
		dev_priv->cdclk_funcs = &bxt_cdclk_funcs;
3068 3069 3070 3071
		if (IS_GEMINILAKE(dev_priv))
			dev_priv->cdclk.table = glk_cdclk_table;
		else
			dev_priv->cdclk.table = bxt_cdclk_table;
3072
	} else if (DISPLAY_VER(dev_priv) == 9) {
3073
		dev_priv->cdclk_funcs = &skl_cdclk_funcs;
3074
	} else if (IS_BROADWELL(dev_priv)) {
3075 3076 3077
		dev_priv->cdclk_funcs = &bdw_cdclk_funcs;
	} else if (IS_HASWELL(dev_priv)) {
		dev_priv->cdclk_funcs = &hsw_cdclk_funcs;
3078
	} else if (IS_CHERRYVIEW(dev_priv)) {
3079
		dev_priv->cdclk_funcs = &chv_cdclk_funcs;
3080
	} else if (IS_VALLEYVIEW(dev_priv)) {
3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
		dev_priv->cdclk_funcs = &vlv_cdclk_funcs;
	} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
		dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
	} else if (IS_IRONLAKE(dev_priv)) {
		dev_priv->cdclk_funcs = &ilk_cdclk_funcs;
	} else if (IS_GM45(dev_priv)) {
		dev_priv->cdclk_funcs = &gm45_cdclk_funcs;
	} else if (IS_G45(dev_priv)) {
		dev_priv->cdclk_funcs = &g33_cdclk_funcs;
	} else if (IS_I965GM(dev_priv)) {
		dev_priv->cdclk_funcs = &i965gm_cdclk_funcs;
	} else if (IS_I965G(dev_priv)) {
		dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
	} else if (IS_PINEVIEW(dev_priv)) {
		dev_priv->cdclk_funcs = &pnv_cdclk_funcs;
	} else if (IS_G33(dev_priv)) {
		dev_priv->cdclk_funcs = &g33_cdclk_funcs;
	} else if (IS_I945GM(dev_priv)) {
		dev_priv->cdclk_funcs = &i945gm_cdclk_funcs;
	} else if (IS_I945G(dev_priv)) {
		dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
	} else if (IS_I915GM(dev_priv)) {
		dev_priv->cdclk_funcs = &i915gm_cdclk_funcs;
	} else if (IS_I915G(dev_priv)) {
		dev_priv->cdclk_funcs = &i915g_cdclk_funcs;
	} else if (IS_I865G(dev_priv)) {
		dev_priv->cdclk_funcs = &i865g_cdclk_funcs;
	} else if (IS_I85X(dev_priv)) {
		dev_priv->cdclk_funcs = &i85x_cdclk_funcs;
	} else if (IS_I845G(dev_priv)) {
		dev_priv->cdclk_funcs = &i845g_cdclk_funcs;
	} else if (IS_I830(dev_priv)) {
		dev_priv->cdclk_funcs = &i830_cdclk_funcs;
3114 3115
	}

3116 3117 3118
	if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs,
		     "Unknown platform. Assuming i830\n"))
		dev_priv->cdclk_funcs = &i830_cdclk_funcs;
3119
}