intel_dpll_mgr.c 126.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2006-2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

L
Lucas De Marchi 已提交
24 25
#include <linux/string_helpers.h>

26
#include "i915_reg.h"
27
#include "intel_de.h"
28
#include "intel_display_types.h"
29
#include "intel_dkl_phy.h"
30
#include "intel_dkl_phy_regs.h"
31
#include "intel_dpio_phy.h"
32
#include "intel_dpll.h"
33
#include "intel_dpll_mgr.h"
J
Jani Nikula 已提交
34
#include "intel_hti.h"
35
#include "intel_mg_phy_regs.h"
36
#include "intel_pch_refclk.h"
37
#include "intel_tc.h"
38

39 40 41 42 43 44 45 46 47 48 49
/**
 * DOC: Display PLLs
 *
 * Display PLLs used for driving outputs vary by platform. While some have
 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
 * from a pool. In the latter scenario, it is possible that multiple pipes
 * share a PLL if their configurations match.
 *
 * This file provides an abstraction over display PLLs. The function
 * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
 * users of a PLL are tracked and that tracking is integrated with the atomic
50 51 52 53
 * modset interface. During an atomic operation, required PLLs can be reserved
 * for a given CRTC and encoder configuration by calling
 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
 * with intel_release_shared_dplls().
54 55 56 57 58
 * Changes to the users are first staged in the atomic state, and then made
 * effective by calling intel_shared_dpll_swap_state() during the atomic
 * commit phase.
 */

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/* platform specific hooks for managing DPLLs */
struct intel_shared_dpll_funcs {
	/*
	 * Hook for enabling the pll, called from intel_enable_shared_dpll() if
	 * the pll is not already enabled.
	 */
	void (*enable)(struct drm_i915_private *i915,
		       struct intel_shared_dpll *pll);

	/*
	 * Hook for disabling the pll, called from intel_disable_shared_dpll()
	 * only when it is safe to disable the pll, i.e., there are no more
	 * tracked users for it.
	 */
	void (*disable)(struct drm_i915_private *i915,
			struct intel_shared_dpll *pll);

	/*
	 * Hook for reading the values currently programmed to the DPLL
	 * registers. This is used for initial hw state readout and state
	 * verification after a mode set.
	 */
	bool (*get_hw_state)(struct drm_i915_private *i915,
			     struct intel_shared_dpll *pll,
			     struct intel_dpll_hw_state *hw_state);

	/*
	 * Hook for calculating the pll's output frequency based on its passed
	 * in state.
	 */
	int (*get_freq)(struct drm_i915_private *i915,
			const struct intel_shared_dpll *pll,
			const struct intel_dpll_hw_state *pll_state);
};

94 95 96
struct intel_dpll_mgr {
	const struct dpll_info *dpll_info;

97 98 99
	int (*compute_dplls)(struct intel_atomic_state *state,
			     struct intel_crtc *crtc,
			     struct intel_encoder *encoder);
100 101 102
	int (*get_dplls)(struct intel_atomic_state *state,
			 struct intel_crtc *crtc,
			 struct intel_encoder *encoder);
103 104 105 106 107
	void (*put_dplls)(struct intel_atomic_state *state,
			  struct intel_crtc *crtc);
	void (*update_active_dpll)(struct intel_atomic_state *state,
				   struct intel_crtc *crtc,
				   struct intel_encoder *encoder);
108
	void (*update_ref_clks)(struct drm_i915_private *i915);
109 110 111 112
	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
			      const struct intel_dpll_hw_state *hw_state);
};

113 114 115 116 117 118 119
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
				  struct intel_shared_dpll_state *shared_dpll)
{
	enum intel_dpll_id i;

	/* Copy shared dpll state */
120 121
	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
		struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
122 123 124 125 126 127 128 129 130 131

		shared_dpll[i] = pll->state;
	}
}

static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
{
	struct intel_atomic_state *state = to_intel_atomic_state(s);

132
	drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
133 134 135 136 137 138 139 140 141 142 143

	if (!state->dpll_set) {
		state->dpll_set = true;

		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
						  state->shared_dpll);
	}

	return state->shared_dpll;
}

144 145 146 147 148 149 150 151
/**
 * intel_get_shared_dpll_by_id - get a DPLL given its id
 * @dev_priv: i915 device instance
 * @id: pll id
 *
 * Returns:
 * A pointer to the DPLL with @id
 */
152
struct intel_shared_dpll *
153 154
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
			    enum intel_dpll_id id)
155
{
156
	return &dev_priv->display.dpll.shared_dplls[id];
157
}
158 159 160 161 162 163 164 165 166

/* For ILK+ */
void assert_shared_dpll(struct drm_i915_private *dev_priv,
			struct intel_shared_dpll *pll,
			bool state)
{
	bool cur_state;
	struct intel_dpll_hw_state hw_state;

167
	if (drm_WARN(&dev_priv->drm, !pll,
L
Lucas De Marchi 已提交
168
		     "asserting DPLL %s with no DPLL\n", str_on_off(state)))
169 170
		return;

171
	cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
172 173
	I915_STATE_WARN(dev_priv, cur_state != state,
			"%s assertion failure (expected %s, current %s)\n",
L
Lucas De Marchi 已提交
174 175
			pll->info->name, str_on_off(state),
			str_on_off(cur_state));
176 177
}

178 179 180 181 182 183 184 185 186 187
static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
{
	return TC_PORT_1 + id - DPLL_ID_ICL_MGPLL1;
}

enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
{
	return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
}

188 189 190 191
static i915_reg_t
intel_combo_pll_enable_reg(struct drm_i915_private *i915,
			   struct intel_shared_dpll *pll)
{
192 193
	if (IS_DG1(i915))
		return DG1_DPLL_ENABLE(pll->info->id);
194 195
	else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
		 (pll->info->id == DPLL_ID_EHL_DPLL4))
196 197
		return MG_PLL_ENABLE(0);

198
	return ICL_DPLL_ENABLE(pll->info->id);
199
}
200

201 202 203 204 205 206 207 208 209 210 211 212 213
static i915_reg_t
intel_tc_pll_enable_reg(struct drm_i915_private *i915,
			struct intel_shared_dpll *pll)
{
	const enum intel_dpll_id id = pll->info->id;
	enum tc_port tc_port = icl_pll_id_to_tc_port(id);

	if (IS_ALDERLAKE_P(i915))
		return ADLP_PORTTC_PLL_ENABLE(tc_port);

	return MG_PLL_ENABLE(tc_port);
}

214
/**
215
 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
216
 * @crtc_state: CRTC, and its state, which has a shared DPLL
217
 *
218
 * Enable the shared DPLL used by @crtc.
219
 */
220
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
221
{
222
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
223 224
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
225
	unsigned int pipe_mask = BIT(crtc->pipe);
226
	unsigned int old_mask;
227

228
	if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
229 230
		return;

231
	mutex_lock(&dev_priv->display.dpll.lock);
232 233
	old_mask = pll->active_mask;

234 235
	if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
	    drm_WARN_ON(&dev_priv->drm, pll->active_mask & pipe_mask))
236
		goto out;
237

238
	pll->active_mask |= pipe_mask;
239

240
	drm_dbg_kms(&dev_priv->drm,
241
		    "enable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
242
		    pll->info->name, pll->active_mask, pll->on,
243
		    crtc->base.base.id, crtc->base.name);
244

245
	if (old_mask) {
246
		drm_WARN_ON(&dev_priv->drm, !pll->on);
247
		assert_shared_dpll_enabled(dev_priv, pll);
248
		goto out;
249
	}
250
	drm_WARN_ON(&dev_priv->drm, pll->on);
251

252
	drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
253
	pll->info->funcs->enable(dev_priv, pll);
254
	pll->on = true;
255 256

out:
257
	mutex_unlock(&dev_priv->display.dpll.lock);
258 259
}

260 261
/**
 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
262
 * @crtc_state: CRTC, and its state, which has a shared DPLL
263 264 265
 *
 * Disable the shared DPLL used by @crtc.
 */
266
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
267
{
268
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
269
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
270
	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
271
	unsigned int pipe_mask = BIT(crtc->pipe);
272 273

	/* PCH only available on ILK+ */
274
	if (DISPLAY_VER(dev_priv) < 5)
275 276 277 278 279
		return;

	if (pll == NULL)
		return;

280
	mutex_lock(&dev_priv->display.dpll.lock);
281 282 283
	if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
		     "%s not used by [CRTC:%d:%s]\n", pll->info->name,
		     crtc->base.base.id, crtc->base.name))
284
		goto out;
285

286
	drm_dbg_kms(&dev_priv->drm,
287
		    "disable %s (active 0x%x, on? %d) for [CRTC:%d:%s]\n",
288
		    pll->info->name, pll->active_mask, pll->on,
289
		    crtc->base.base.id, crtc->base.name);
290 291

	assert_shared_dpll_enabled(dev_priv, pll);
292
	drm_WARN_ON(&dev_priv->drm, !pll->on);
293

294
	pll->active_mask &= ~pipe_mask;
295
	if (pll->active_mask)
296
		goto out;
297

298
	drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
299
	pll->info->funcs->disable(dev_priv, pll);
300
	pll->on = false;
301 302

out:
303
	mutex_unlock(&dev_priv->display.dpll.lock);
304 305
}

306
static struct intel_shared_dpll *
307 308 309
intel_find_shared_dpll(struct intel_atomic_state *state,
		       const struct intel_crtc *crtc,
		       const struct intel_dpll_hw_state *pll_state,
M
Matt Roper 已提交
310
		       unsigned long dpll_mask)
311
{
312
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
313
	struct intel_shared_dpll *pll, *unused_pll = NULL;
314
	struct intel_shared_dpll_state *shared_dpll;
315
	enum intel_dpll_id i;
316

317
	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
318

319
	drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
M
Matt Roper 已提交
320 321

	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
322
		pll = &dev_priv->display.dpll.shared_dplls[i];
323 324

		/* Only want to check enabled timings first */
325
		if (shared_dpll[i].pipe_mask == 0) {
326 327
			if (!unused_pll)
				unused_pll = pll;
328
			continue;
329
		}
330

331
		if (memcmp(pll_state,
332
			   &shared_dpll[i].hw_state,
333
			   sizeof(*pll_state)) == 0) {
334
			drm_dbg_kms(&dev_priv->drm,
335
				    "[CRTC:%d:%s] sharing existing %s (pipe mask 0x%x, active 0x%x)\n",
336 337
				    crtc->base.base.id, crtc->base.name,
				    pll->info->name,
338
				    shared_dpll[i].pipe_mask,
339
				    pll->active_mask);
340
			return pll;
341 342 343 344
		}
	}

	/* Ok no matching timings, maybe there's a free one? */
345
	if (unused_pll) {
346 347 348
		drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
			    crtc->base.base.id, crtc->base.name,
			    unused_pll->info->name);
349
		return unused_pll;
350 351
	}

352
	return NULL;
353 354
}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
/**
 * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC
 * @crtc: CRTC on which behalf the reference is taken
 * @pll: DPLL for which the reference is taken
 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 *
 * Take a reference for @pll tracking the use of it by @crtc.
 */
static void
intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc,
				 const struct intel_shared_dpll *pll,
				 struct intel_shared_dpll_state *shared_dpll_state)
{
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);

	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0);

	shared_dpll_state->pipe_mask |= BIT(crtc->pipe);

	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n",
		    crtc->base.base.id, crtc->base.name, pll->info->name);
}

378
static void
379 380 381 382
intel_reference_shared_dpll(struct intel_atomic_state *state,
			    const struct intel_crtc *crtc,
			    const struct intel_shared_dpll *pll,
			    const struct intel_dpll_hw_state *pll_state)
383
{
384
	struct intel_shared_dpll_state *shared_dpll;
385
	const enum intel_dpll_id id = pll->info->id;
386

387
	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
388

389
	if (shared_dpll[id].pipe_mask == 0)
390
		shared_dpll[id].hw_state = *pll_state;
391

392 393 394 395 396 397 398 399 400 401 402
	intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
}

/**
 * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC
 * @crtc: CRTC on which behalf the reference is dropped
 * @pll: DPLL for which the reference is dropped
 * @shared_dpll_state: the DPLL atomic state in which the reference is tracked
 *
 * Drop a reference for @pll tracking the end of use of it by @crtc.
 */
403
void
404 405 406 407 408
intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc,
				   const struct intel_shared_dpll *pll,
				   struct intel_shared_dpll_state *shared_dpll_state)
{
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
409

410
	drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0);
411

412 413 414
	shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe);

	drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n",
415
		    crtc->base.base.id, crtc->base.name, pll->info->name);
416 417
}

418 419 420 421 422
static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
					  const struct intel_crtc *crtc,
					  const struct intel_shared_dpll *pll)
{
	struct intel_shared_dpll_state *shared_dpll;
423
	const enum intel_dpll_id id = pll->info->id;
424 425

	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
426

427
	intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]);
428 429 430 431 432
}

static void intel_put_dpll(struct intel_atomic_state *state,
			   struct intel_crtc *crtc)
{
433
	const struct intel_crtc_state *old_crtc_state =
434
		intel_atomic_get_old_crtc_state(state, crtc);
435 436 437 438
	struct intel_crtc_state *new_crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);

	new_crtc_state->shared_dpll = NULL;
439

440
	if (!old_crtc_state->shared_dpll)
441 442
		return;

443
	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
444 445
}

446 447 448 449 450 451 452 453 454 455 456
/**
 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
 * @state: atomic state
 *
 * This is the dpll version of drm_atomic_helper_swap_state() since the
 * helper does not handle driver-specific global state.
 *
 * For consistency with atomic helpers this function does a complete swap,
 * i.e. it also puts the current state into @state, even though there is no
 * need for that at this moment.
 */
457
void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
458
{
459 460
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
461 462
	enum intel_dpll_id i;

463
	if (!state->dpll_set)
464 465
		return;

466
	for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
467
		struct intel_shared_dpll *pll =
468
			&dev_priv->display.dpll.shared_dplls[i];
469

470
		swap(pll->state, shared_dpll[i]);
471 472 473 474 475 476 477
	}
}

static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
				      struct intel_shared_dpll *pll,
				      struct intel_dpll_hw_state *hw_state)
{
478
	const enum intel_dpll_id id = pll->info->id;
479
	intel_wakeref_t wakeref;
480
	u32 val;
481

482
	wakeref = intel_display_power_get_if_enabled(dev_priv,
483
						     POWER_DOMAIN_DISPLAY_CORE);
484
	if (!wakeref)
485 486
		return false;

487
	val = intel_de_read(dev_priv, PCH_DPLL(id));
488
	hw_state->dpll = val;
489 490
	hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
	hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
491

492
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
493 494 495 496 497 498 499 500 501

	return val & DPLL_VCO_ENABLE;
}

static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
	u32 val;
	bool enabled;

502
	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
503 504
	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
			    DREF_SUPERSPREAD_SOURCE_MASK));
505 506
	I915_STATE_WARN(dev_priv, !enabled,
			"PCH refclk assertion failure, should be active but is disabled\n");
507 508 509 510 511
}

static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
				struct intel_shared_dpll *pll)
{
512 513
	const enum intel_dpll_id id = pll->info->id;

514 515 516
	/* PCH refclock must be enabled first */
	ibx_assert_pch_refclk_enabled(dev_priv);

517 518 519
	intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
	intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);

520
	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
521 522

	/* Wait for the clocks to stabilize. */
523
	intel_de_posting_read(dev_priv, PCH_DPLL(id));
524 525 526 527 528 529 530
	udelay(150);

	/* The pixel multiplier can only be updated once the
	 * DPLL is enabled and the clocks are stable.
	 *
	 * So write it again.
	 */
531 532
	intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
	intel_de_posting_read(dev_priv, PCH_DPLL(id));
533 534 535 536 537 538
	udelay(200);
}

static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
				 struct intel_shared_dpll *pll)
{
539
	const enum intel_dpll_id id = pll->info->id;
540

541 542
	intel_de_write(dev_priv, PCH_DPLL(id), 0);
	intel_de_posting_read(dev_priv, PCH_DPLL(id));
543 544 545
	udelay(200);
}

546 547 548 549 550 551 552
static int ibx_compute_dpll(struct intel_atomic_state *state,
			    struct intel_crtc *crtc,
			    struct intel_encoder *encoder)
{
	return 0;
}

553 554 555
static int ibx_get_dpll(struct intel_atomic_state *state,
			struct intel_crtc *crtc,
			struct intel_encoder *encoder)
556
{
557 558
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
559 560 561 562 563 564 565
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_shared_dpll *pll;
	enum intel_dpll_id i;

	if (HAS_PCH_IBX(dev_priv)) {
		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
		i = (enum intel_dpll_id) crtc->pipe;
566
		pll = &dev_priv->display.dpll.shared_dplls[i];
567

568 569 570 571
		drm_dbg_kms(&dev_priv->drm,
			    "[CRTC:%d:%s] using pre-allocated %s\n",
			    crtc->base.base.id, crtc->base.name,
			    pll->info->name);
572
	} else {
573 574
		pll = intel_find_shared_dpll(state, crtc,
					     &crtc_state->dpll_hw_state,
M
Matt Roper 已提交
575 576
					     BIT(DPLL_ID_PCH_PLL_B) |
					     BIT(DPLL_ID_PCH_PLL_A));
577 578
	}

579
	if (!pll)
580
		return -EINVAL;
581

582
	/* reference the pll */
583 584 585 586
	intel_reference_shared_dpll(state, crtc,
				    pll, &crtc_state->dpll_hw_state);

	crtc_state->shared_dpll = pll;
587

588
	return 0;
589 590
}

591
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
592
			      const struct intel_dpll_hw_state *hw_state)
593
{
594 595 596 597 598 599 600
	drm_dbg_kms(&dev_priv->drm,
		    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
		    "fp0: 0x%x, fp1: 0x%x\n",
		    hw_state->dpll,
		    hw_state->dpll_md,
		    hw_state->fp0,
		    hw_state->fp1);
601 602
}

603 604 605 606
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
	.enable = ibx_pch_dpll_enable,
	.disable = ibx_pch_dpll_disable,
	.get_hw_state = ibx_pch_dpll_get_hw_state,
607 608
};

609 610 611 612 613 614 615 616
static const struct dpll_info pch_plls[] = {
	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
	{ },
};

static const struct intel_dpll_mgr pch_pll_mgr = {
	.dpll_info = pch_plls,
617
	.compute_dplls = ibx_compute_dpll,
618 619 620 621 622
	.get_dplls = ibx_get_dpll,
	.put_dplls = intel_put_dpll,
	.dump_hw_state = ibx_dump_hw_state,
};

623
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
624
				 struct intel_shared_dpll *pll)
625
{
626 627
	const enum intel_dpll_id id = pll->info->id;

628 629
	intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
630 631 632 633 634 635
	udelay(20);
}

static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
				struct intel_shared_dpll *pll)
{
636 637
	intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
	intel_de_posting_read(dev_priv, SPLL_CTL);
638 639 640 641 642 643
	udelay(20);
}

static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
				  struct intel_shared_dpll *pll)
{
644
	const enum intel_dpll_id id = pll->info->id;
645

646
	intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0);
647
	intel_de_posting_read(dev_priv, WRPLL_CTL(id));
648 649 650 651 652

	/*
	 * Try to set up the PCH reference clock once all DPLLs
	 * that depend on it have been shut down.
	 */
653
	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
654
		intel_init_pch_refclk(dev_priv);
655 656 657 658 659
}

static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
				 struct intel_shared_dpll *pll)
{
660
	enum intel_dpll_id id = pll->info->id;
661

662
	intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0);
663
	intel_de_posting_read(dev_priv, SPLL_CTL);
664 665 666 667 668

	/*
	 * Try to set up the PCH reference clock once all DPLLs
	 * that depend on it have been shut down.
	 */
669
	if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
670
		intel_init_pch_refclk(dev_priv);
671 672 673 674 675 676
}

static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
				       struct intel_shared_dpll *pll,
				       struct intel_dpll_hw_state *hw_state)
{
677
	const enum intel_dpll_id id = pll->info->id;
678
	intel_wakeref_t wakeref;
679
	u32 val;
680

681
	wakeref = intel_display_power_get_if_enabled(dev_priv,
682
						     POWER_DOMAIN_DISPLAY_CORE);
683
	if (!wakeref)
684 685
		return false;

686
	val = intel_de_read(dev_priv, WRPLL_CTL(id));
687 688
	hw_state->wrpll = val;

689
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
690 691 692 693 694 695 696 697

	return val & WRPLL_PLL_ENABLE;
}

static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
				      struct intel_shared_dpll *pll,
				      struct intel_dpll_hw_state *hw_state)
{
698
	intel_wakeref_t wakeref;
699
	u32 val;
700

701
	wakeref = intel_display_power_get_if_enabled(dev_priv,
702
						     POWER_DOMAIN_DISPLAY_CORE);
703
	if (!wakeref)
704 705
		return false;

706
	val = intel_de_read(dev_priv, SPLL_CTL);
707 708
	hw_state->spll = val;

709
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
710 711 712 713

	return val & SPLL_PLL_ENABLE;
}

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
#define LC_FREQ 2700
#define LC_FREQ_2K U64_C(LC_FREQ * 2000)

#define P_MIN 2
#define P_MAX 64
#define P_INC 2

/* Constraints for PLL good behavior */
#define REF_MIN 48
#define REF_MAX 400
#define VCO_MIN 2400
#define VCO_MAX 4800

struct hsw_wrpll_rnp {
	unsigned p, n2, r2;
};

static unsigned hsw_wrpll_get_budget_for_freq(int clock)
{
	switch (clock) {
	case 25175000:
	case 25200000:
	case 27000000:
	case 27027000:
	case 37762500:
	case 37800000:
	case 40500000:
	case 40541000:
	case 54000000:
	case 54054000:
	case 59341000:
	case 59400000:
	case 72000000:
	case 74176000:
	case 74250000:
	case 81000000:
	case 81081000:
	case 89012000:
	case 89100000:
	case 108000000:
	case 108108000:
	case 111264000:
	case 111375000:
	case 148352000:
	case 148500000:
	case 162000000:
	case 162162000:
	case 222525000:
	case 222750000:
	case 296703000:
	case 297000000:
765
		return 0;
766 767 768 769 770
	case 233500000:
	case 245250000:
	case 247750000:
	case 253250000:
	case 298000000:
771
		return 1500;
772 773 774 775
	case 169128000:
	case 169500000:
	case 179500000:
	case 202000000:
776
		return 2000;
777 778 779 780 781 782 783 784 785
	case 256250000:
	case 262500000:
	case 270000000:
	case 272500000:
	case 273750000:
	case 280750000:
	case 281250000:
	case 286000000:
	case 291750000:
786
		return 4000;
787 788
	case 267250000:
	case 268500000:
789
		return 5000;
790
	default:
791
		return 1000;
792 793 794
	}
}

795 796 797
static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
				 unsigned int r2, unsigned int n2,
				 unsigned int p,
798 799
				 struct hsw_wrpll_rnp *best)
{
800
	u64 a, b, c, d, diff, diff_best;
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858

	/* No best (r,n,p) yet */
	if (best->p == 0) {
		best->p = p;
		best->n2 = n2;
		best->r2 = r2;
		return;
	}

	/*
	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
	 * freq2k.
	 *
	 * delta = 1e6 *
	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
	 *	   freq2k;
	 *
	 * and we would like delta <= budget.
	 *
	 * If the discrepancy is above the PPM-based budget, always prefer to
	 * improve upon the previous solution.  However, if you're within the
	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
	 */
	a = freq2k * budget * p * r2;
	b = freq2k * budget * best->p * best->r2;
	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
	diff_best = abs_diff(freq2k * best->p * best->r2,
			     LC_FREQ_2K * best->n2);
	c = 1000000 * diff;
	d = 1000000 * diff_best;

	if (a < c && b < d) {
		/* If both are above the budget, pick the closer */
		if (best->p * best->r2 * diff < p * r2 * diff_best) {
			best->p = p;
			best->n2 = n2;
			best->r2 = r2;
		}
	} else if (a >= c && b < d) {
		/* If A is below the threshold but B is above it?  Update. */
		best->p = p;
		best->n2 = n2;
		best->r2 = r2;
	} else if (a >= c && b >= d) {
		/* Both are below the limit, so pick the higher n2/(r2*r2) */
		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
			best->p = p;
			best->n2 = n2;
			best->r2 = r2;
		}
	}
	/* Otherwise a < c && b >= d, do nothing */
}

static void
hsw_ddi_calculate_wrpll(int clock /* in Hz */,
			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
{
859
	u64 freq2k;
860
	unsigned p, n2, r2;
861
	struct hsw_wrpll_rnp best = {};
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
	unsigned budget;

	freq2k = clock / 100;

	budget = hsw_wrpll_get_budget_for_freq(clock);

	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
	 * and directly pass the LC PLL to it. */
	if (freq2k == 5400000) {
		*n2_out = 2;
		*p_out = 1;
		*r2_out = 2;
		return;
	}

	/*
	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
	 * the WR PLL.
	 *
	 * We want R so that REF_MIN <= Ref <= REF_MAX.
	 * Injecting R2 = 2 * R gives:
	 *   REF_MAX * r2 > LC_FREQ * 2 and
	 *   REF_MIN * r2 < LC_FREQ * 2
	 *
	 * Which means the desired boundaries for r2 are:
	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
	 *
	 */
	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
	     r2 <= LC_FREQ * 2 / REF_MIN;
	     r2++) {

		/*
		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
		 *
		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
		 *   VCO_MAX * r2 > n2 * LC_FREQ and
		 *   VCO_MIN * r2 < n2 * LC_FREQ)
		 *
		 * Which means the desired boundaries for n2 are:
		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
		 */
		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
		     n2 <= VCO_MAX * r2 / LC_FREQ;
		     n2++) {

			for (p = P_MIN; p <= P_MAX; p += P_INC)
				hsw_wrpll_update_rnp(freq2k, budget,
						     r2, n2, p, &best);
		}
	}

	*n2_out = best.n2;
	*p_out = best.p;
	*r2_out = best.r2;
}

920
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
921 922
				  const struct intel_shared_dpll *pll,
				  const struct intel_dpll_hw_state *pll_state)
923 924 925
{
	int refclk;
	int n, p, r;
926
	u32 wrpll = pll_state->wrpll;
927 928 929

	switch (wrpll & WRPLL_REF_MASK) {
	case WRPLL_REF_SPECIAL_HSW:
930
		/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
931
		if (IS_HASWELL(dev_priv) && !IS_HASWELL_ULT(dev_priv)) {
932
			refclk = dev_priv->display.dpll.ref_clks.nssc;
933 934
			break;
		}
935
		fallthrough;
936 937 938 939 940 941
	case WRPLL_REF_PCH_SSC:
		/*
		 * We could calculate spread here, but our checking
		 * code only cares about 5% accuracy, and spread is a max of
		 * 0.5% downspread.
		 */
942
		refclk = dev_priv->display.dpll.ref_clks.ssc;
943 944
		break;
	case WRPLL_REF_LCPLL:
945
		refclk = 2700000;
946 947 948 949 950 951 952 953 954 955 956
		break;
	default:
		MISSING_CASE(wrpll);
		return 0;
	}

	r = wrpll & WRPLL_DIVIDER_REF_MASK;
	p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
	n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;

	/* Convert to KHz, p & r have a fixed point portion */
957
	return (refclk * n / 10) / (p * r) * 2;
958 959
}

960 961 962 963
static int
hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
			   struct intel_crtc *crtc)
{
964
	struct drm_i915_private *i915 = to_i915(state->base.dev);
965 966 967 968 969 970 971 972 973 974 975
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	unsigned int p, n2, r2;

	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);

	crtc_state->dpll_hw_state.wrpll =
		WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
		WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
		WRPLL_DIVIDER_POST(p);

976 977 978
	crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
							&crtc_state->dpll_hw_state);

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
	return 0;
}

static struct intel_shared_dpll *
hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
		       struct intel_crtc *crtc)
{
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);

	return intel_find_shared_dpll(state, crtc,
				      &crtc_state->dpll_hw_state,
				      BIT(DPLL_ID_WRPLL2) |
				      BIT(DPLL_ID_WRPLL1));
}

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
	int clock = crtc_state->port_clock;

	switch (clock / 2) {
	case 81000:
	case 135000:
	case 270000:
		return 0;
	default:
		drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
			    clock);
		return -EINVAL;
	}
}

1013
static struct intel_shared_dpll *
1014
hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
M
Manasi Navare 已提交
1015
{
1016
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
M
Manasi Navare 已提交
1017 1018
	struct intel_shared_dpll *pll;
	enum intel_dpll_id pll_id;
1019
	int clock = crtc_state->port_clock;
M
Manasi Navare 已提交
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031

	switch (clock / 2) {
	case 81000:
		pll_id = DPLL_ID_LCPLL_810;
		break;
	case 135000:
		pll_id = DPLL_ID_LCPLL_1350;
		break;
	case 270000:
		pll_id = DPLL_ID_LCPLL_2700;
		break;
	default:
1032
		MISSING_CASE(clock / 2);
M
Manasi Navare 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
		return NULL;
	}

	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);

	if (!pll)
		return NULL;

	return pll;
}

1044
static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1045 1046
				  const struct intel_shared_dpll *pll,
				  const struct intel_dpll_hw_state *pll_state)
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
{
	int link_clock = 0;

	switch (pll->info->id) {
	case DPLL_ID_LCPLL_810:
		link_clock = 81000;
		break;
	case DPLL_ID_LCPLL_1350:
		link_clock = 135000;
		break;
	case DPLL_ID_LCPLL_2700:
		link_clock = 270000;
		break;
	default:
		drm_WARN(&i915->drm, 1, "bad port clock sel\n");
		break;
	}

	return link_clock * 2;
}

1068 1069 1070
static int
hsw_ddi_spll_compute_dpll(struct intel_atomic_state *state,
			  struct intel_crtc *crtc)
1071 1072 1073 1074
{
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);

1075
	if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
1076
		return -EINVAL;
1077

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	crtc_state->dpll_hw_state.spll =
		SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;

	return 0;
}

static struct intel_shared_dpll *
hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
		      struct intel_crtc *crtc)
{
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
1090 1091 1092 1093 1094

	return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
				      BIT(DPLL_ID_SPLL));
}

1095
static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1096 1097
				 const struct intel_shared_dpll *pll,
				 const struct intel_dpll_hw_state *pll_state)
1098 1099 1100
{
	int link_clock = 0;

1101
	switch (pll_state->spll & SPLL_FREQ_MASK) {
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	case SPLL_FREQ_810MHz:
		link_clock = 81000;
		break;
	case SPLL_FREQ_1350MHz:
		link_clock = 135000;
		break;
	case SPLL_FREQ_2700MHz:
		link_clock = 270000;
		break;
	default:
		drm_WARN(&i915->drm, 1, "bad spll freq\n");
		break;
	}

	return link_clock * 2;
}

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
static int hsw_compute_dpll(struct intel_atomic_state *state,
			    struct intel_crtc *crtc,
			    struct intel_encoder *encoder)
{
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);

	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
		return hsw_ddi_wrpll_compute_dpll(state, crtc);
	else if (intel_crtc_has_dp_encoder(crtc_state))
		return hsw_ddi_lcpll_compute_dpll(crtc_state);
	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
		return hsw_ddi_spll_compute_dpll(state, crtc);
	else
		return -EINVAL;
}

1136 1137 1138
static int hsw_get_dpll(struct intel_atomic_state *state,
			struct intel_crtc *crtc,
			struct intel_encoder *encoder)
1139
{
1140 1141
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
1142
	struct intel_shared_dpll *pll = NULL;
1143

1144
	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1145
		pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1146
	else if (intel_crtc_has_dp_encoder(crtc_state))
1147
		pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1148 1149
	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
		pll = hsw_ddi_spll_get_dpll(state, crtc);
1150 1151

	if (!pll)
1152
		return -EINVAL;
1153

1154 1155 1156 1157
	intel_reference_shared_dpll(state, crtc,
				    pll, &crtc_state->dpll_hw_state);

	crtc_state->shared_dpll = pll;
1158

1159
	return 0;
1160 1161
}

1162 1163
static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
{
1164
	i915->display.dpll.ref_clks.ssc = 135000;
1165 1166
	/* Non-SSC is only used on non-ULT HSW. */
	if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1167
		i915->display.dpll.ref_clks.nssc = 24000;
1168
	else
1169
		i915->display.dpll.ref_clks.nssc = 135000;
1170 1171
}

1172
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1173
			      const struct intel_dpll_hw_state *hw_state)
1174
{
1175 1176
	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
		    hw_state->wrpll, hw_state->spll);
1177 1178
}

1179 1180 1181 1182
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
	.enable = hsw_ddi_wrpll_enable,
	.disable = hsw_ddi_wrpll_disable,
	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
1183
	.get_freq = hsw_ddi_wrpll_get_freq,
1184 1185
};

1186 1187 1188 1189
static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
	.enable = hsw_ddi_spll_enable,
	.disable = hsw_ddi_spll_disable,
	.get_hw_state = hsw_ddi_spll_get_hw_state,
1190
	.get_freq = hsw_ddi_spll_get_freq,
1191 1192
};

1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
				 struct intel_shared_dpll *pll)
{
}

static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
				  struct intel_shared_dpll *pll)
{
}

static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
				       struct intel_shared_dpll *pll,
				       struct intel_dpll_hw_state *hw_state)
{
	return true;
}

static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
	.enable = hsw_ddi_lcpll_enable,
	.disable = hsw_ddi_lcpll_disable,
	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
1214
	.get_freq = hsw_ddi_lcpll_get_freq,
1215 1216
};

1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
static const struct dpll_info hsw_plls[] = {
	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
	{ },
};

static const struct intel_dpll_mgr hsw_pll_mgr = {
	.dpll_info = hsw_plls,
1229
	.compute_dplls = hsw_compute_dpll,
1230 1231
	.get_dplls = hsw_get_dpll,
	.put_dplls = intel_put_dpll,
1232
	.update_ref_clks = hsw_update_dpll_ref_clks,
1233 1234 1235
	.dump_hw_state = hsw_dump_hw_state,
};

1236 1237 1238 1239 1240
struct skl_dpll_regs {
	i915_reg_t ctl, cfgcr1, cfgcr2;
};

/* this array is indexed by the *shared* pll id */
1241 1242 1243 1244 1245 1246
static const struct skl_dpll_regs skl_dpll_regs[4] = {
	{
		/* DPLL 0 */
		.ctl = LCPLL1_CTL,
		/* DPLL 0 doesn't support HDMI mode */
	},
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
	{
		/* DPLL 1 */
		.ctl = LCPLL2_CTL,
		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
	},
	{
		/* DPLL 2 */
		.ctl = WRPLL_CTL(0),
		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
	},
	{
		/* DPLL 3 */
		.ctl = WRPLL_CTL(1),
		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
	},
};

1267 1268
static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
				    struct intel_shared_dpll *pll)
1269
{
1270
	const enum intel_dpll_id id = pll->info->id;
1271

1272 1273 1274
	intel_de_rmw(dev_priv, DPLL_CTRL1,
		     DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id),
		     pll->state.hw_state.ctrl1 << (id * 6));
1275
	intel_de_posting_read(dev_priv, DPLL_CTRL1);
1276 1277 1278 1279 1280 1281
}

static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
			       struct intel_shared_dpll *pll)
{
	const struct skl_dpll_regs *regs = skl_dpll_regs;
1282
	const enum intel_dpll_id id = pll->info->id;
1283 1284

	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1285

1286 1287 1288 1289
	intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
	intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
	intel_de_posting_read(dev_priv, regs[id].cfgcr1);
	intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1290 1291

	/* the enable bit is always bit 31 */
1292
	intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
1293

1294
	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1295
		drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1296 1297 1298 1299 1300 1301
}

static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
				 struct intel_shared_dpll *pll)
{
	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1302 1303 1304 1305 1306 1307
}

static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
				struct intel_shared_dpll *pll)
{
	const struct skl_dpll_regs *regs = skl_dpll_regs;
1308
	const enum intel_dpll_id id = pll->info->id;
1309 1310

	/* the enable bit is always bit 31 */
1311
	intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0);
1312
	intel_de_posting_read(dev_priv, regs[id].ctl);
1313 1314
}

1315 1316 1317 1318 1319
static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
				  struct intel_shared_dpll *pll)
{
}

1320 1321 1322 1323
static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
				     struct intel_shared_dpll *pll,
				     struct intel_dpll_hw_state *hw_state)
{
1324
	u32 val;
1325
	const struct skl_dpll_regs *regs = skl_dpll_regs;
1326
	const enum intel_dpll_id id = pll->info->id;
1327
	intel_wakeref_t wakeref;
1328 1329
	bool ret;

1330
	wakeref = intel_display_power_get_if_enabled(dev_priv,
1331
						     POWER_DOMAIN_DISPLAY_CORE);
1332
	if (!wakeref)
1333 1334 1335 1336
		return false;

	ret = false;

1337
	val = intel_de_read(dev_priv, regs[id].ctl);
1338 1339 1340
	if (!(val & LCPLL_PLL_ENABLE))
		goto out;

1341
	val = intel_de_read(dev_priv, DPLL_CTRL1);
1342
	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1343 1344

	/* avoid reading back stale values if HDMI mode is not enabled */
1345
	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1346 1347
		hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
		hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1348 1349 1350 1351
	}
	ret = true;

out:
1352
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1353 1354 1355 1356

	return ret;
}

1357 1358 1359 1360 1361
static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
				       struct intel_shared_dpll *pll,
				       struct intel_dpll_hw_state *hw_state)
{
	const struct skl_dpll_regs *regs = skl_dpll_regs;
1362
	const enum intel_dpll_id id = pll->info->id;
1363
	intel_wakeref_t wakeref;
1364
	u32 val;
1365 1366
	bool ret;

1367
	wakeref = intel_display_power_get_if_enabled(dev_priv,
1368
						     POWER_DOMAIN_DISPLAY_CORE);
1369
	if (!wakeref)
1370 1371 1372 1373 1374
		return false;

	ret = false;

	/* DPLL0 is always enabled since it drives CDCLK */
1375
	val = intel_de_read(dev_priv, regs[id].ctl);
1376
	if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1377 1378
		goto out;

1379
	val = intel_de_read(dev_priv, DPLL_CTRL1);
1380
	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1381 1382 1383 1384

	ret = true;

out:
1385
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1386 1387 1388 1389

	return ret;
}

1390
struct skl_wrpll_context {
1391 1392 1393
	u64 min_deviation;		/* current minimal deviation */
	u64 central_freq;		/* chosen central freq */
	u64 dco_freq;			/* chosen dco freq */
1394 1395 1396 1397 1398 1399 1400 1401
	unsigned int p;			/* chosen divider */
};

/* DCO freq must be within +1%/-6%  of the DCO central freq */
#define SKL_DCO_MAX_PDEVIATION	100
#define SKL_DCO_MAX_NDEVIATION	600

static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1402 1403
				  u64 central_freq,
				  u64 dco_freq,
1404 1405
				  unsigned int divider)
{
1406
	u64 deviation;
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479

	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
			      central_freq);

	/* positive deviation */
	if (dco_freq >= central_freq) {
		if (deviation < SKL_DCO_MAX_PDEVIATION &&
		    deviation < ctx->min_deviation) {
			ctx->min_deviation = deviation;
			ctx->central_freq = central_freq;
			ctx->dco_freq = dco_freq;
			ctx->p = divider;
		}
	/* negative deviation */
	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
		   deviation < ctx->min_deviation) {
		ctx->min_deviation = deviation;
		ctx->central_freq = central_freq;
		ctx->dco_freq = dco_freq;
		ctx->p = divider;
	}
}

static void skl_wrpll_get_multipliers(unsigned int p,
				      unsigned int *p0 /* out */,
				      unsigned int *p1 /* out */,
				      unsigned int *p2 /* out */)
{
	/* even dividers */
	if (p % 2 == 0) {
		unsigned int half = p / 2;

		if (half == 1 || half == 2 || half == 3 || half == 5) {
			*p0 = 2;
			*p1 = 1;
			*p2 = half;
		} else if (half % 2 == 0) {
			*p0 = 2;
			*p1 = half / 2;
			*p2 = 2;
		} else if (half % 3 == 0) {
			*p0 = 3;
			*p1 = half / 3;
			*p2 = 2;
		} else if (half % 7 == 0) {
			*p0 = 7;
			*p1 = half / 7;
			*p2 = 2;
		}
	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
		*p0 = 3;
		*p1 = 1;
		*p2 = p / 3;
	} else if (p == 5 || p == 7) {
		*p0 = p;
		*p1 = 1;
		*p2 = 1;
	} else if (p == 15) {
		*p0 = 3;
		*p1 = 1;
		*p2 = 5;
	} else if (p == 21) {
		*p0 = 7;
		*p1 = 1;
		*p2 = 3;
	} else if (p == 35) {
		*p0 = 7;
		*p1 = 1;
		*p2 = 5;
	}
}

struct skl_wrpll_params {
1480 1481 1482 1483 1484 1485 1486
	u32 dco_fraction;
	u32 dco_integer;
	u32 qdiv_ratio;
	u32 qdiv_mode;
	u32 kdiv;
	u32 pdiv;
	u32 central_freq;
1487 1488 1489
};

static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1490
				      u64 afe_clock,
1491
				      int ref_clock,
1492 1493
				      u64 central_freq,
				      u32 p0, u32 p1, u32 p2)
1494
{
1495
	u64 dco_freq;
1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550

	switch (central_freq) {
	case 9600000000ULL:
		params->central_freq = 0;
		break;
	case 9000000000ULL:
		params->central_freq = 1;
		break;
	case 8400000000ULL:
		params->central_freq = 3;
	}

	switch (p0) {
	case 1:
		params->pdiv = 0;
		break;
	case 2:
		params->pdiv = 1;
		break;
	case 3:
		params->pdiv = 2;
		break;
	case 7:
		params->pdiv = 4;
		break;
	default:
		WARN(1, "Incorrect PDiv\n");
	}

	switch (p2) {
	case 5:
		params->kdiv = 0;
		break;
	case 2:
		params->kdiv = 1;
		break;
	case 3:
		params->kdiv = 2;
		break;
	case 1:
		params->kdiv = 3;
		break;
	default:
		WARN(1, "Incorrect KDiv\n");
	}

	params->qdiv_ratio = p1;
	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;

	dco_freq = p0 * p1 * p2 * afe_clock;

	/*
	 * Intermediate values are in Hz.
	 * Divide by MHz to match bsepc
	 */
1551
	params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1552
	params->dco_fraction =
1553
		div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1554 1555 1556
			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}

1557
static int
1558
skl_ddi_calculate_wrpll(int clock /* in Hz */,
1559
			int ref_clock,
1560 1561
			struct skl_wrpll_params *wrpll_params)
{
1562 1563 1564 1565 1566 1567 1568 1569 1570
	static const u64 dco_central_freq[3] = { 8400000000ULL,
						 9000000000ULL,
						 9600000000ULL };
	static const u8 even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
					    24, 28, 30, 32, 36, 40, 42, 44,
					    48, 52, 54, 56, 60, 64, 66, 68,
					    70, 72, 76, 78, 80, 84, 88, 90,
					    92, 96, 98 };
	static const u8 odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1571
	static const struct {
1572
		const u8 *list;
1573 1574 1575 1576 1577
		int n_dividers;
	} dividers[] = {
		{ even_dividers, ARRAY_SIZE(even_dividers) },
		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
	};
1578 1579 1580
	struct skl_wrpll_context ctx = {
		.min_deviation = U64_MAX,
	};
1581 1582
	unsigned int dco, d, i;
	unsigned int p0, p1, p2;
1583
	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1584 1585 1586 1587 1588

	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
			for (i = 0; i < dividers[d].n_dividers; i++) {
				unsigned int p = dividers[d].list[i];
1589
				u64 dco_freq = p * afe_clock;
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613

				skl_wrpll_try_divider(&ctx,
						      dco_central_freq[dco],
						      dco_freq,
						      p);
				/*
				 * Skip the remaining dividers if we're sure to
				 * have found the definitive divider, we can't
				 * improve a 0 deviation.
				 */
				if (ctx.min_deviation == 0)
					goto skip_remaining_dividers;
			}
		}

skip_remaining_dividers:
		/*
		 * If a solution is found with an even divider, prefer
		 * this one.
		 */
		if (d == 0 && ctx.p)
			break;
	}

1614
	if (!ctx.p)
1615
		return -EINVAL;
1616 1617 1618 1619 1620 1621 1622

	/*
	 * gcc incorrectly analyses that these can be used without being
	 * initialized. To be fair, it's hard to guess.
	 */
	p0 = p1 = p2 = 0;
	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1623 1624
	skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
				  ctx.central_freq, p0, p1, p2);
1625

1626
	return 0;
1627 1628
}

1629
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1630 1631
				  const struct intel_shared_dpll *pll,
				  const struct intel_dpll_hw_state *pll_state)
1632
{
1633
	int ref_clock = i915->display.dpll.ref_clks.nssc;
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
	u32 p0, p1, p2, dco_freq;

	p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
	p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;

	if (pll_state->cfgcr2 &  DPLL_CFGCR2_QDIV_MODE(1))
		p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
	else
		p1 = 1;


	switch (p0) {
	case DPLL_CFGCR2_PDIV_1:
		p0 = 1;
		break;
	case DPLL_CFGCR2_PDIV_2:
		p0 = 2;
		break;
	case DPLL_CFGCR2_PDIV_3:
		p0 = 3;
		break;
1655 1656 1657 1658 1659 1660 1661
	case DPLL_CFGCR2_PDIV_7_INVALID:
		/*
		 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
		 * handling it the same way as PDIV_7.
		 */
		drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
		fallthrough;
1662 1663 1664
	case DPLL_CFGCR2_PDIV_7:
		p0 = 7;
		break;
1665 1666 1667
	default:
		MISSING_CASE(p0);
		return 0;
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
	}

	switch (p2) {
	case DPLL_CFGCR2_KDIV_5:
		p2 = 5;
		break;
	case DPLL_CFGCR2_KDIV_2:
		p2 = 2;
		break;
	case DPLL_CFGCR2_KDIV_3:
		p2 = 3;
		break;
	case DPLL_CFGCR2_KDIV_1:
		p2 = 1;
		break;
1683 1684 1685
	default:
		MISSING_CASE(p2);
		return 0;
1686 1687
	}

1688 1689
	dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
		   ref_clock;
1690

1691 1692
	dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
		    ref_clock / 0x8000;
1693

1694
	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1695 1696 1697 1698 1699
		return 0;

	return dco_freq / (p0 * p1 * p2 * 5);
}

1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
	struct skl_wrpll_params wrpll_params = {};
	u32 ctrl1, cfgcr1, cfgcr2;
	int ret;

	/*
	 * See comment in intel_dpll_hw_state to understand why we always use 0
	 * as the DPLL id in this function.
	 */
	ctrl1 = DPLL_CTRL1_OVERRIDE(0);

	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);

	ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
				      i915->display.dpll.ref_clks.nssc, &wrpll_params);
	if (ret)
		return ret;

	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
		wrpll_params.dco_integer;

	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
		wrpll_params.central_freq;

	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;

1734 1735 1736
	crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
							&crtc_state->dpll_hw_state);

1737 1738 1739
	return 0;
}

1740
static int
1741
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
J
Jim Bride 已提交
1742
{
1743
	u32 ctrl1;
J
Jim Bride 已提交
1744 1745 1746 1747 1748 1749

	/*
	 * See comment in intel_dpll_hw_state to understand why we always use 0
	 * as the DPLL id in this function.
	 */
	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1750
	switch (crtc_state->port_clock / 2) {
J
Jim Bride 已提交
1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
	case 81000:
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
		break;
	case 135000:
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
		break;
	case 270000:
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
		break;
		/* eDP 1.4 rates */
	case 162000:
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
		break;
	case 108000:
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
		break;
	case 216000:
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
		break;
	}
1771

1772 1773
	crtc_state->dpll_hw_state.ctrl1 = ctrl1;

1774
	return 0;
J
Jim Bride 已提交
1775
}
1776

1777
static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1778 1779
				  const struct intel_shared_dpll *pll,
				  const struct intel_dpll_hw_state *pll_state)
1780 1781 1782
{
	int link_clock = 0;

1783
	switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
		DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
	case DPLL_CTRL1_LINK_RATE_810:
		link_clock = 81000;
		break;
	case DPLL_CTRL1_LINK_RATE_1080:
		link_clock = 108000;
		break;
	case DPLL_CTRL1_LINK_RATE_1350:
		link_clock = 135000;
		break;
	case DPLL_CTRL1_LINK_RATE_1620:
		link_clock = 162000;
		break;
	case DPLL_CTRL1_LINK_RATE_2160:
		link_clock = 216000;
		break;
	case DPLL_CTRL1_LINK_RATE_2700:
		link_clock = 270000;
		break;
	default:
		drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
		break;
	}

	return link_clock * 2;
}

1811 1812 1813
static int skl_compute_dpll(struct intel_atomic_state *state,
			    struct intel_crtc *crtc,
			    struct intel_encoder *encoder)
J
Jim Bride 已提交
1814
{
1815 1816
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
1817 1818

	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1819
		return skl_ddi_hdmi_pll_dividers(crtc_state);
1820
	else if (intel_crtc_has_dp_encoder(crtc_state))
1821
		return skl_ddi_dp_set_dpll_hw_state(crtc_state);
1822
	else
1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
		return -EINVAL;
}

static int skl_get_dpll(struct intel_atomic_state *state,
			struct intel_crtc *crtc,
			struct intel_encoder *encoder)
{
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	struct intel_shared_dpll *pll;
1833

1834
	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1835 1836
		pll = intel_find_shared_dpll(state, crtc,
					     &crtc_state->dpll_hw_state,
M
Matt Roper 已提交
1837
					     BIT(DPLL_ID_SKL_DPLL0));
1838
	else
1839 1840
		pll = intel_find_shared_dpll(state, crtc,
					     &crtc_state->dpll_hw_state,
M
Matt Roper 已提交
1841 1842 1843
					     BIT(DPLL_ID_SKL_DPLL3) |
					     BIT(DPLL_ID_SKL_DPLL2) |
					     BIT(DPLL_ID_SKL_DPLL1));
1844
	if (!pll)
1845
		return -EINVAL;
1846

1847 1848 1849 1850
	intel_reference_shared_dpll(state, crtc,
				    pll, &crtc_state->dpll_hw_state);

	crtc_state->shared_dpll = pll;
1851

1852
	return 0;
1853 1854
}

1855
static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1856 1857
				const struct intel_shared_dpll *pll,
				const struct intel_dpll_hw_state *pll_state)
1858 1859 1860 1861 1862
{
	/*
	 * ctrl1 register is already shifted for each pll, just use 0 to get
	 * the internal shift for each field
	 */
1863 1864
	if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
		return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1865
	else
1866
		return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1867 1868
}

1869 1870 1871
static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
{
	/* No SSC ref */
1872
	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
1873 1874
}

1875
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1876
			      const struct intel_dpll_hw_state *hw_state)
1877
{
1878
	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1879 1880 1881 1882 1883 1884
		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
		      hw_state->ctrl1,
		      hw_state->cfgcr1,
		      hw_state->cfgcr2);
}

1885 1886 1887 1888
static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
	.enable = skl_ddi_pll_enable,
	.disable = skl_ddi_pll_disable,
	.get_hw_state = skl_ddi_pll_get_hw_state,
1889
	.get_freq = skl_ddi_pll_get_freq,
1890 1891
};

1892 1893 1894 1895
static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
	.enable = skl_ddi_dpll0_enable,
	.disable = skl_ddi_dpll0_disable,
	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1896
	.get_freq = skl_ddi_pll_get_freq,
1897 1898
};

1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
static const struct dpll_info skl_plls[] = {
	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
	{ },
};

static const struct intel_dpll_mgr skl_pll_mgr = {
	.dpll_info = skl_plls,
1909
	.compute_dplls = skl_compute_dpll,
1910 1911
	.get_dplls = skl_get_dpll,
	.put_dplls = intel_put_dpll,
1912
	.update_ref_clks = skl_update_dpll_ref_clks,
1913 1914 1915
	.dump_hw_state = skl_dump_hw_state,
};

1916 1917 1918
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
				struct intel_shared_dpll *pll)
{
1919
	u32 temp;
1920
	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1921 1922 1923
	enum dpio_phy phy;
	enum dpio_channel ch;

1924
	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1925 1926

	/* Non-SSC reference */
1927
	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL);
1928

1929
	if (IS_GEMINILAKE(dev_priv)) {
1930 1931
		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
			     0, PORT_PLL_POWER_ENABLE);
1932

1933
		if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1934
				 PORT_PLL_POWER_STATE), 200))
1935 1936
			drm_err(&dev_priv->drm,
				"Power state not set for PLL:%d\n", port);
1937 1938
	}

1939
	/* Disable 10 bit clock */
1940 1941
	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch),
		     PORT_PLL_10BIT_CLK_ENABLE, 0);
1942 1943

	/* Write P1 & P2 */
1944 1945
	intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch),
		     PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0);
1946 1947

	/* Write M2 integer */
1948 1949
	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0),
		     PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0);
1950 1951

	/* Write N */
1952 1953
	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1),
		     PORT_PLL_N_MASK, pll->state.hw_state.pll1);
1954 1955

	/* Write M2 fraction */
1956 1957
	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2),
		     PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2);
1958 1959

	/* Write M2 fraction enable */
1960 1961
	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3),
		     PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3);
1962 1963

	/* Write coeff */
1964
	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1965 1966 1967
	temp &= ~PORT_PLL_PROP_COEFF_MASK;
	temp &= ~PORT_PLL_INT_COEFF_MASK;
	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1968
	temp |= pll->state.hw_state.pll6;
1969
	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1970 1971

	/* Write calibration val */
1972 1973
	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8),
		     PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8);
1974

1975 1976
	intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9),
		     PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9);
1977

1978
	temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1979 1980
	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
	temp &= ~PORT_PLL_DCO_AMP_MASK;
1981
	temp |= pll->state.hw_state.pll10;
1982
	intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1983 1984

	/* Recalibrate with new settings */
1985
	temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1986
	temp |= PORT_PLL_RECALIBRATE;
1987
	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1988
	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1989
	temp |= pll->state.hw_state.ebb4;
1990
	intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1991 1992

	/* Enable PLL */
1993
	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
1994
	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1995

1996
	if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1997
			200))
1998
		drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1999

2000
	if (IS_GEMINILAKE(dev_priv)) {
2001
		temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
2002
		temp |= DCC_DELAY_RANGE_2;
2003
		intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
2004 2005
	}

2006 2007 2008 2009
	/*
	 * While we write to the group register to program all lanes at once we
	 * can read only lane registers and we pick lanes 0/1 for that.
	 */
2010
	temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
2011 2012
	temp &= ~LANE_STAGGER_MASK;
	temp &= ~LANESTAGGER_STRAP_OVRD;
2013
	temp |= pll->state.hw_state.pcsdw12;
2014
	intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
2015 2016 2017 2018 2019
}

static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
					struct intel_shared_dpll *pll)
{
2020
	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2021

2022
	intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
2023
	intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2024 2025

	if (IS_GEMINILAKE(dev_priv)) {
2026 2027
		intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port),
			     PORT_PLL_POWER_ENABLE, 0);
2028

2029 2030
		if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
				  PORT_PLL_POWER_STATE), 200))
2031 2032
			drm_err(&dev_priv->drm,
				"Power state not reset for PLL:%d\n", port);
2033
	}
2034 2035 2036 2037 2038 2039
}

static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
					struct intel_shared_dpll *pll,
					struct intel_dpll_hw_state *hw_state)
{
2040
	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
2041
	intel_wakeref_t wakeref;
2042 2043
	enum dpio_phy phy;
	enum dpio_channel ch;
2044
	u32 val;
2045
	bool ret;
2046

2047
	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
2048

2049
	wakeref = intel_display_power_get_if_enabled(dev_priv,
2050
						     POWER_DOMAIN_DISPLAY_CORE);
2051
	if (!wakeref)
2052 2053 2054 2055
		return false;

	ret = false;

2056
	val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2057 2058 2059
	if (!(val & PORT_PLL_ENABLE))
		goto out;

2060
	hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2061 2062
	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;

2063
	hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2064 2065
	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;

2066
	hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2067
	hw_state->pll0 &= PORT_PLL_M2_INT_MASK;
2068

2069
	hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2070 2071
	hw_state->pll1 &= PORT_PLL_N_MASK;

2072
	hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2073 2074
	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;

2075
	hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2076 2077
	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;

2078
	hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2079 2080 2081 2082
	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
			  PORT_PLL_INT_COEFF_MASK |
			  PORT_PLL_GAIN_CTL_MASK;

2083
	hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2084 2085
	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;

2086
	hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2087 2088
	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;

2089
	hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2090 2091 2092 2093 2094 2095 2096 2097
	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
			   PORT_PLL_DCO_AMP_MASK;

	/*
	 * While we write to the group register to program all lanes at once we
	 * can read only lane registers. We configure all lanes the same way, so
	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
	 */
2098 2099 2100
	hw_state->pcsdw12 = intel_de_read(dev_priv,
					  BXT_PORT_PCS_DW12_LN01(phy, ch));
	if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2101 2102 2103 2104 2105
		drm_dbg(&dev_priv->drm,
			"lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
			hw_state->pcsdw12,
			intel_de_read(dev_priv,
				      BXT_PORT_PCS_DW12_LN23(phy, ch)));
2106 2107 2108 2109 2110
	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;

	ret = true;

out:
2111
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2112 2113 2114 2115

	return ret;
}

2116
/* pre-calculated values for DP linkrates */
2117
static const struct dpll bxt_dp_clk_val[] = {
2118
	/* m2 is .22 binary fixed point */
2119 2120 2121 2122 2123 2124 2125
	{ .dot = 162000, .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
	{ .dot = 270000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
	{ .dot = 540000, .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 /* 27.0 */ },
	{ .dot = 216000, .p1 = 3, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
	{ .dot = 243000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6133333 /* 24.3 */ },
	{ .dot = 324000, .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
	{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
2126 2127
};

2128
static int
2129
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2130
			  struct dpll *clk_div)
2131
{
2132
	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2133

2134 2135 2136 2137 2138
	/* Calculate HDMI div */
	/*
	 * FIXME: tie the following calculation into
	 * i9xx_crtc_compute_clock
	 */
2139
	if (!bxt_find_best_dpll(crtc_state, clk_div))
2140
		return -EINVAL;
2141

2142
	drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
2143

2144
	return 0;
2145 2146
}

2147
static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2148
				    struct dpll *clk_div)
2149
{
2150
	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2151 2152 2153 2154
	int i;

	*clk_div = bxt_dp_clk_val[0];
	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2155
		if (crtc_state->port_clock == bxt_dp_clk_val[i].dot) {
2156 2157
			*clk_div = bxt_dp_clk_val[i];
			break;
2158 2159 2160
		}
	}

2161
	chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
2162 2163 2164

	drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
		    clk_div->dot != crtc_state->port_clock);
2165 2166
}

2167 2168
static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
				     const struct dpll *clk_div)
2169
{
2170
	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2171
	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2172
	int clock = crtc_state->port_clock;
2173
	int vco = clk_div->vco;
2174 2175
	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
	u32 lanestagger;
2176

2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
	if (vco >= 6200000 && vco <= 6700000) {
		prop_coef = 4;
		int_coef = 9;
		gain_ctl = 3;
		targ_cnt = 8;
	} else if ((vco > 5400000 && vco < 6200000) ||
			(vco >= 4800000 && vco < 5400000)) {
		prop_coef = 5;
		int_coef = 11;
		gain_ctl = 3;
		targ_cnt = 9;
	} else if (vco == 5400000) {
		prop_coef = 3;
		int_coef = 8;
		gain_ctl = 1;
		targ_cnt = 9;
	} else {
2194
		drm_err(&i915->drm, "Invalid VCO\n");
2195
		return -EINVAL;
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208
	}

	if (clock > 270000)
		lanestagger = 0x18;
	else if (clock > 135000)
		lanestagger = 0x0d;
	else if (clock > 67000)
		lanestagger = 0x07;
	else if (clock > 33000)
		lanestagger = 0x04;
	else
		lanestagger = 0x02;

2209
	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2210
	dpll_hw_state->pll0 = PORT_PLL_M2_INT(clk_div->m2 >> 22);
2211
	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2212
	dpll_hw_state->pll2 = PORT_PLL_M2_FRAC(clk_div->m2 & 0x3fffff);
2213

2214
	if (clk_div->m2 & 0x3fffff)
2215
		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2216

2217 2218 2219
	dpll_hw_state->pll6 = PORT_PLL_PROP_COEFF(prop_coef) |
		PORT_PLL_INT_COEFF(int_coef) |
		PORT_PLL_GAIN_CTL(gain_ctl);
2220

2221
	dpll_hw_state->pll8 = PORT_PLL_TARGET_CNT(targ_cnt);
2222

2223
	dpll_hw_state->pll9 = PORT_PLL_LOCK_THRESHOLD(5);
2224

2225 2226
	dpll_hw_state->pll10 = PORT_PLL_DCO_AMP(15) |
		PORT_PLL_DCO_AMP_OVR_EN_H;
2227

2228 2229 2230 2231
	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;

	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;

2232
	return 0;
2233 2234
}

2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
				const struct intel_shared_dpll *pll,
				const struct intel_dpll_hw_state *pll_state)
{
	struct dpll clock;

	clock.m1 = 2;
	clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
	if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
		clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
	clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
	clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
	clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);

	return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
}

2252
static int
2253
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2254
{
2255
	struct dpll clk_div = {};
2256

2257
	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2258

2259
	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2260 2261
}

2262
static int
2263
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2264
{
2265
	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2266
	struct dpll clk_div = {};
2267
	int ret;
2268

2269
	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2270

2271 2272 2273 2274 2275 2276 2277 2278
	ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
	if (ret)
		return ret;

	crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
						      &crtc_state->dpll_hw_state);

	return 0;
2279 2280
}

2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295
static int bxt_compute_dpll(struct intel_atomic_state *state,
			    struct intel_crtc *crtc,
			    struct intel_encoder *encoder)
{
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);

	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
		return bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
	else if (intel_crtc_has_dp_encoder(crtc_state))
		return bxt_ddi_dp_set_dpll_hw_state(crtc_state);
	else
		return -EINVAL;
}

2296 2297 2298
static int bxt_get_dpll(struct intel_atomic_state *state,
			struct intel_crtc *crtc,
			struct intel_encoder *encoder)
2299
{
2300 2301
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
2302 2303
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_shared_dpll *pll;
2304
	enum intel_dpll_id id;
2305

2306
	/* 1:1 mapping between ports and PLLs */
2307 2308
	id = (enum intel_dpll_id) encoder->port;
	pll = intel_get_shared_dpll_by_id(dev_priv, id);
2309

2310 2311
	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
		    crtc->base.base.id, crtc->base.name, pll->info->name);
2312

2313 2314 2315 2316
	intel_reference_shared_dpll(state, crtc,
				    pll, &crtc_state->dpll_hw_state);

	crtc_state->shared_dpll = pll;
2317

2318
	return 0;
2319 2320
}

2321 2322
static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
{
2323 2324
	i915->display.dpll.ref_clks.ssc = 100000;
	i915->display.dpll.ref_clks.nssc = 100000;
2325 2326 2327
	/* DSI non-SSC ref 19.2MHz */
}

2328
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2329
			      const struct intel_dpll_hw_state *hw_state)
2330
{
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
	drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
		    "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
		    "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
		    hw_state->ebb0,
		    hw_state->ebb4,
		    hw_state->pll0,
		    hw_state->pll1,
		    hw_state->pll2,
		    hw_state->pll3,
		    hw_state->pll6,
		    hw_state->pll8,
		    hw_state->pll9,
		    hw_state->pll10,
		    hw_state->pcsdw12);
2345 2346
}

2347 2348 2349 2350
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
	.enable = bxt_ddi_pll_enable,
	.disable = bxt_ddi_pll_disable,
	.get_hw_state = bxt_ddi_pll_get_hw_state,
2351
	.get_freq = bxt_ddi_pll_get_freq,
2352
};
2353

2354
static const struct dpll_info bxt_plls[] = {
2355 2356 2357 2358
	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
	{ },
2359 2360
};

2361 2362
static const struct intel_dpll_mgr bxt_pll_mgr = {
	.dpll_info = bxt_plls,
2363
	.compute_dplls = bxt_compute_dpll,
2364 2365
	.get_dplls = bxt_get_dpll,
	.put_dplls = intel_put_dpll,
2366
	.update_ref_clks = bxt_update_dpll_ref_clks,
2367
	.dump_hw_state = bxt_dump_hw_state,
2368 2369
};

2370
static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2371
				      int *qdiv, int *kdiv)
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
{
	/* even dividers */
	if (bestdiv % 2 == 0) {
		if (bestdiv == 2) {
			*pdiv = 2;
			*qdiv = 1;
			*kdiv = 1;
		} else if (bestdiv % 4 == 0) {
			*pdiv = 2;
			*qdiv = bestdiv / 4;
			*kdiv = 2;
		} else if (bestdiv % 6 == 0) {
			*pdiv = 3;
			*qdiv = bestdiv / 6;
			*kdiv = 2;
		} else if (bestdiv % 5 == 0) {
			*pdiv = 5;
			*qdiv = bestdiv / 10;
			*kdiv = 2;
		} else if (bestdiv % 14 == 0) {
			*pdiv = 7;
			*qdiv = bestdiv / 14;
			*kdiv = 2;
		}
	} else {
		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
			*pdiv = bestdiv;
			*qdiv = 1;
			*kdiv = 1;
		} else { /* 9, 15, 21 */
			*pdiv = bestdiv / 3;
			*qdiv = 1;
			*kdiv = 3;
		}
	}
}

2409
static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
2410 2411
				      u32 dco_freq, u32 ref_freq,
				      int pdiv, int qdiv, int kdiv)
2412
{
2413 2414
	u32 dco;

2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
	switch (kdiv) {
	case 1:
		params->kdiv = 1;
		break;
	case 2:
		params->kdiv = 2;
		break;
	case 3:
		params->kdiv = 4;
		break;
	default:
		WARN(1, "Incorrect KDiv\n");
	}

	switch (pdiv) {
	case 2:
		params->pdiv = 1;
		break;
	case 3:
		params->pdiv = 2;
		break;
	case 5:
		params->pdiv = 4;
		break;
	case 7:
		params->pdiv = 8;
		break;
	default:
		WARN(1, "Incorrect PDiv\n");
	}

2446
	WARN_ON(kdiv != 2 && qdiv != 1);
2447 2448 2449 2450

	params->qdiv_ratio = qdiv;
	params->qdiv_mode = (qdiv == 1) ? 0 : 1;

2451 2452 2453 2454
	dco = div_u64((u64)dco_freq << 15, ref_freq);

	params->dco_integer = dco >> 15;
	params->dco_fraction = dco & 0x7fff;
2455 2456
}

2457
/*
2458
 * Display WA #22010492432: ehl, tgl, adl-s, adl-p
2459 2460 2461
 * Program half of the nominal DCO divider fraction value.
 */
static bool
2462
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2463
{
2464 2465
	return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
		 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
2466
		 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
2467
		 i915->display.dpll.ref_clks.nssc == 38400;
2468 2469
}

2470 2471 2472 2473 2474
struct icl_combo_pll_params {
	int clock;
	struct skl_wrpll_params wrpll;
};

2475 2476 2477 2478
/*
 * These values alrea already adjusted: they're the bits we write to the
 * registers, not the logical values.
 */
2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503
static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
	{ 540000,
	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 270000,
	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 162000,
	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 324000,
	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 216000,
	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
	{ 432000,
	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 648000,
	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 810000,
	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2504 2505
};

2506

2507
/* Also used for 38.4 MHz values. */
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
	{ 540000,
	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 270000,
	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 162000,
	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 324000,
	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 216000,
	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
	{ 432000,
	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 648000,
	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
	{ 810000,
	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2533 2534
};

2535 2536 2537 2538 2539 2540 2541 2542 2543 2544
static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
	.dco_integer = 0x151, .dco_fraction = 0x4000,
	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};

static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};

2545 2546 2547 2548 2549 2550 2551 2552 2553
static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
	.dco_integer = 0x54, .dco_fraction = 0x3000,
	/* the following params are unused */
	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
};

static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
	.dco_integer = 0x43, .dco_fraction = 0x4000,
	/* the following params are unused */
2554 2555
};

2556 2557
static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
				 struct skl_wrpll_params *pll_params)
2558
{
2559
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2560
	const struct icl_combo_pll_params *params =
2561
		dev_priv->display.dpll.ref_clks.nssc == 24000 ?
2562 2563
		icl_dp_combo_pll_24MHz_values :
		icl_dp_combo_pll_19_2MHz_values;
2564
	int clock = crtc_state->port_clock;
2565
	int i;
2566

2567 2568 2569
	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
		if (clock == params[i].clock) {
			*pll_params = params[i].wrpll;
2570
			return 0;
2571
		}
2572 2573
	}

2574
	MISSING_CASE(clock);
2575
	return -EINVAL;
2576 2577
}

2578 2579
static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
			    struct skl_wrpll_params *pll_params)
2580
{
2581
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2582

2583
	if (DISPLAY_VER(dev_priv) >= 12) {
2584
		switch (dev_priv->display.dpll.ref_clks.nssc) {
2585
		default:
2586
			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2587
			fallthrough;
2588
		case 19200:
2589
		case 38400:
2590 2591 2592 2593 2594 2595 2596
			*pll_params = tgl_tbt_pll_19_2MHz_values;
			break;
		case 24000:
			*pll_params = tgl_tbt_pll_24MHz_values;
			break;
		}
	} else {
2597
		switch (dev_priv->display.dpll.ref_clks.nssc) {
2598
		default:
2599
			MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
2600
			fallthrough;
2601 2602 2603 2604 2605 2606 2607 2608 2609 2610
		case 19200:
		case 38400:
			*pll_params = icl_tbt_pll_19_2MHz_values;
			break;
		case 24000:
			*pll_params = icl_tbt_pll_24MHz_values;
			break;
		}
	}

2611
	return 0;
2612 2613
}

2614
static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
2615 2616
				    const struct intel_shared_dpll *pll,
				    const struct intel_dpll_hw_state *pll_state)
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
{
	/*
	 * The PLL outputs multiple frequencies at the same time, selection is
	 * made at DDI clock mux level.
	 */
	drm_WARN_ON(&i915->drm, 1);

	return 0;
}

2627 2628
static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
{
2629
	int ref_clock = i915->display.dpll.ref_clks.nssc;
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640

	/*
	 * For ICL+, the spec states: if reference frequency is 38.4,
	 * use 19.2 because the DPLL automatically divides that by 2.
	 */
	if (ref_clock == 38400)
		ref_clock = 19200;

	return ref_clock;
}

2641
static int
2642 2643 2644 2645
icl_calc_wrpll(struct intel_crtc_state *crtc_state,
	       struct skl_wrpll_params *wrpll_params)
{
	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
	int ref_clock = icl_wrpll_ref_clock(i915);
	u32 afe_clock = crtc_state->port_clock * 5;
	u32 dco_min = 7998000;
	u32 dco_max = 10000000;
	u32 dco_mid = (dco_min + dco_max) / 2;
	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
					 18, 20, 24, 28, 30, 32,  36,  40,
					 42, 44, 48, 50, 52, 54,  56,  60,
					 64, 66, 68, 70, 72, 76,  78,  80,
					 84, 88, 90, 92, 96, 98, 100, 102,
					  3,  5,  7,  9, 15, 21 };
	u32 dco, best_dco = 0, dco_centrality = 0;
	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2660

2661 2662 2663 2664 2665
	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
		dco = afe_clock * dividers[d];

		if (dco <= dco_max && dco >= dco_min) {
			dco_centrality = abs(dco - dco_mid);
2666

2667 2668 2669 2670 2671 2672 2673 2674 2675
			if (dco_centrality < best_dco_centrality) {
				best_dco_centrality = dco_centrality;
				best_div = dividers[d];
				best_dco = dco;
			}
		}
	}

	if (best_div == 0)
2676
		return -EINVAL;
2677 2678 2679 2680 2681

	icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
	icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
				  pdiv, qdiv, kdiv);

2682
	return 0;
2683 2684 2685
}

static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
2686 2687
				      const struct intel_shared_dpll *pll,
				      const struct intel_dpll_hw_state *pll_state)
2688
{
2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743
	int ref_clock = icl_wrpll_ref_clock(i915);
	u32 dco_fraction;
	u32 p0, p1, p2, dco_freq;

	p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
	p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;

	if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
		p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
			DPLL_CFGCR1_QDIV_RATIO_SHIFT;
	else
		p1 = 1;

	switch (p0) {
	case DPLL_CFGCR1_PDIV_2:
		p0 = 2;
		break;
	case DPLL_CFGCR1_PDIV_3:
		p0 = 3;
		break;
	case DPLL_CFGCR1_PDIV_5:
		p0 = 5;
		break;
	case DPLL_CFGCR1_PDIV_7:
		p0 = 7;
		break;
	}

	switch (p2) {
	case DPLL_CFGCR1_KDIV_1:
		p2 = 1;
		break;
	case DPLL_CFGCR1_KDIV_2:
		p2 = 2;
		break;
	case DPLL_CFGCR1_KDIV_3:
		p2 = 3;
		break;
	}

	dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
		   ref_clock;

	dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
		       DPLL_CFGCR0_DCO_FRACTION_SHIFT;

	if (ehl_combo_pll_div_frac_wa_needed(i915))
		dco_fraction *= 2;

	dco_freq += (dco_fraction * ref_clock) / 0x8000;

	if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
		return 0;

	return dco_freq / (p0 * p1 * p2 * 5);
2744 2745
}

2746 2747
static void icl_calc_dpll_state(struct drm_i915_private *i915,
				const struct skl_wrpll_params *pll_params,
2748
				struct intel_dpll_hw_state *pll_state)
2749
{
2750 2751
	u32 dco_fraction = pll_params->dco_fraction;

2752
	if (ehl_combo_pll_div_frac_wa_needed(i915))
2753 2754 2755
		dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);

	pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
2756
			    pll_params->dco_integer;
2757

2758 2759 2760 2761
	pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
			    DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
			    DPLL_CFGCR1_KDIV(pll_params->kdiv) |
			    DPLL_CFGCR1_PDIV(pll_params->pdiv);
2762

2763
	if (DISPLAY_VER(i915) >= 12)
2764
		pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2765
	else
2766
		pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2767

J
Jani Nikula 已提交
2768 2769
	if (i915->display.vbt.override_afc_startup)
		pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
2770 2771
}

2772 2773 2774 2775
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
				    u32 *target_dco_khz,
				    struct intel_dpll_hw_state *state,
				    bool is_dkl)
2776
{
2777
	static const u8 div1_vals[] = { 7, 5, 3, 2 };
2778
	u32 dco_min_freq, dco_max_freq;
2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789
	unsigned int i;
	int div2;

	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
	dco_max_freq = is_dp ? 8100000 : 10000000;

	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
		int div1 = div1_vals[i];

		for (div2 = 10; div2 > 0; div2--) {
			int dco = div1 * div2 * clock_khz * 5;
2790 2791
			int a_divratio, tlinedrv, inputsel;
			u32 hsdiv;
2792 2793 2794 2795 2796

			if (dco < dco_min_freq || dco > dco_max_freq)
				continue;

			if (div2 >= 2) {
2797 2798 2799 2800 2801 2802 2803
				/*
				 * Note: a_divratio not matching TGL BSpec
				 * algorithm but matching hardcoded values and
				 * working on HW for DP alt-mode at least
				 */
				a_divratio = is_dp ? 10 : 5;
				tlinedrv = is_dkl ? 1 : 2;
2804 2805 2806 2807 2808 2809 2810 2811 2812
			} else {
				a_divratio = 5;
				tlinedrv = 0;
			}
			inputsel = is_dp ? 0 : 1;

			switch (div1) {
			default:
				MISSING_CASE(div1);
2813
				fallthrough;
2814
			case 2:
2815
				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2816 2817
				break;
			case 3:
2818
				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2819 2820
				break;
			case 5:
2821
				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2822 2823
				break;
			case 7:
2824
				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
				break;
			}

			*target_dco_khz = dco;

			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);

			state->mg_clktop2_coreclkctl1 =
				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);

			state->mg_clktop2_hsclkctl =
				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2838
				hsdiv |
2839 2840
				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);

2841
			return 0;
2842 2843 2844
		}
	}

2845
	return -EINVAL;
2846 2847 2848 2849 2850 2851
}

/*
 * The specification for this function uses real numbers, so the math had to be
 * adapted to integer-only calculation, that's why it looks so different.
 */
2852 2853
static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
				 struct intel_dpll_hw_state *pll_state)
2854
{
2855
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2856
	int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
2857
	int clock = crtc_state->port_clock;
2858 2859 2860 2861 2862 2863
	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
	u32 iref_ndiv, iref_trim, iref_pulse_w;
	u32 prop_coeff, int_coeff;
	u32 tdc_targetcnt, feedfwgain;
	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
	u64 tmp;
2864 2865
	bool use_ssc = false;
	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2866
	bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
2867
	int ret;
2868

2869 2870
	ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
				       pll_state, is_dkl);
2871
	if (ret)
2872
		return ret;
2873 2874 2875 2876

	m1div = 2;
	m2div_int = dco_khz / (refclk_khz * m1div);
	if (m2div_int > 255) {
2877 2878 2879 2880 2881
		if (!is_dkl) {
			m1div = 4;
			m2div_int = dco_khz / (refclk_khz * m1div);
		}

2882
		if (m2div_int > 255)
2883
			return -EINVAL;
2884 2885 2886
	}
	m2div_rem = dco_khz % (refclk_khz * m1div);

2887
	tmp = (u64)m2div_rem * (1 << 22);
2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
	do_div(tmp, refclk_khz * m1div);
	m2div_frac = tmp;

	switch (refclk_khz) {
	case 19200:
		iref_ndiv = 1;
		iref_trim = 28;
		iref_pulse_w = 1;
		break;
	case 24000:
		iref_ndiv = 1;
		iref_trim = 25;
		iref_pulse_w = 2;
		break;
	case 38400:
		iref_ndiv = 2;
		iref_trim = 28;
		iref_pulse_w = 1;
		break;
	default:
		MISSING_CASE(refclk_khz);
2909
		return -EINVAL;
2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945
	}

	/*
	 * tdc_res = 0.000003
	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
	 *
	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
	 * was supposed to be a division, but we rearranged the operations of
	 * the formula to avoid early divisions so we don't multiply the
	 * rounding errors.
	 *
	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
	 * we also rearrange to work with integers.
	 *
	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
	 * last division by 10.
	 */
	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;

	/*
	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
	 * 32 bits. That's not a problem since we round the division down
	 * anyway.
	 */
	feedfwgain = (use_ssc || m2div_rem > 0) ?
		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;

	if (dco_khz >= 9000000) {
		prop_coeff = 5;
		int_coeff = 10;
	} else {
		prop_coeff = 4;
		int_coeff = 8;
	}

	if (use_ssc) {
2946
		tmp = mul_u32_u32(dco_khz, 47 * 32);
2947 2948 2949
		do_div(tmp, refclk_khz * m1div * 10000);
		ssc_stepsize = tmp;

2950
		tmp = mul_u32_u32(dco_khz, 1000);
2951 2952 2953 2954 2955 2956 2957
		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
	} else {
		ssc_stepsize = 0;
		ssc_steplen = 0;
	}
	ssc_steplog = 4;

2958
	/* write pll_state calculations */
2959 2960 2961 2962 2963
	if (is_dkl) {
		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
					 DKL_PLL_DIV0_FBPREDIV(m1div) |
					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
J
Jani Nikula 已提交
2964 2965
		if (dev_priv->display.vbt.override_afc_startup) {
			u8 val = dev_priv->display.vbt.override_afc_startup_val;
2966 2967 2968

			pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
		}
2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985

		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);

		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
					(use_ssc ? DKL_PLL_SSC_EN : 0);

		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);

		pll_state->mg_pll_tdc_coldst_bias =
				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);

	} else {
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045
		pll_state->mg_pll_div0 =
			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
			MG_PLL_DIV0_FBDIV_INT(m2div_int);

		pll_state->mg_pll_div1 =
			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
			MG_PLL_DIV1_DITHER_DIV_2 |
			MG_PLL_DIV1_NDIVRATIO(1) |
			MG_PLL_DIV1_FBPREDIV(m1div);

		pll_state->mg_pll_lf =
			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
			MG_PLL_LF_AFCCNTSEL_512 |
			MG_PLL_LF_GAINCTRL(1) |
			MG_PLL_LF_INT_COEFF(int_coeff) |
			MG_PLL_LF_PROP_COEFF(prop_coeff);

		pll_state->mg_pll_frac_lock =
			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
			MG_PLL_FRAC_LOCK_DCODITHEREN |
			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
		if (use_ssc || m2div_rem > 0)
			pll_state->mg_pll_frac_lock |=
				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;

		pll_state->mg_pll_ssc =
			(use_ssc ? MG_PLL_SSC_EN : 0) |
			MG_PLL_SSC_TYPE(2) |
			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
			MG_PLL_SSC_STEPNUM(ssc_steplog) |
			MG_PLL_SSC_FLLEN |
			MG_PLL_SSC_STEPSIZE(ssc_stepsize);

		pll_state->mg_pll_tdc_coldst_bias =
			MG_PLL_TDC_COLDST_COLDSTART |
			MG_PLL_TDC_COLDST_IREFINT_EN |
			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
			MG_PLL_TDC_TDCOVCCORR_EN |
			MG_PLL_TDC_TDCSEL(3);

		pll_state->mg_pll_bias =
			MG_PLL_BIAS_BIAS_GB_SEL(3) |
			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
			MG_PLL_BIAS_BIAS_BONUS(10) |
			MG_PLL_BIAS_BIASCAL_EN |
			MG_PLL_BIAS_CTRIM(12) |
			MG_PLL_BIAS_VREF_RDAC(4) |
			MG_PLL_BIAS_IREFTRIM(iref_trim);

		if (refclk_khz == 38400) {
			pll_state->mg_pll_tdc_coldst_bias_mask =
				MG_PLL_TDC_COLDST_COLDSTART;
			pll_state->mg_pll_bias_mask = 0;
		} else {
			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
			pll_state->mg_pll_bias_mask = -1U;
		}
3046

3047 3048 3049 3050
		pll_state->mg_pll_tdc_coldst_bias &=
			pll_state->mg_pll_tdc_coldst_bias_mask;
		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
	}
3051

3052
	return 0;
3053 3054
}

3055
static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3056 3057
				   const struct intel_shared_dpll *pll,
				   const struct intel_dpll_hw_state *pll_state)
3058 3059 3060 3061
{
	u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
	u64 tmp;

3062
	ref_clock = dev_priv->display.dpll.ref_clks.nssc;
3063

3064
	if (DISPLAY_VER(dev_priv) >= 12) {
3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126
		m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
		m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
		m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;

		if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
			m2_frac = pll_state->mg_pll_bias &
				  DKL_PLL_BIAS_FBDIV_FRAC_MASK;
			m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
		} else {
			m2_frac = 0;
		}
	} else {
		m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
		m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;

		if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
			m2_frac = pll_state->mg_pll_div0 &
				  MG_PLL_DIV0_FBDIV_FRAC_MASK;
			m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
		} else {
			m2_frac = 0;
		}
	}

	switch (pll_state->mg_clktop2_hsclkctl &
		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
		div1 = 2;
		break;
	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
		div1 = 3;
		break;
	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
		div1 = 5;
		break;
	case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
		div1 = 7;
		break;
	default:
		MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
		return 0;
	}

	div2 = (pll_state->mg_clktop2_hsclkctl &
		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;

	/* div2 value of 0 is same as 1 means no div */
	if (div2 == 0)
		div2 = 1;

	/*
	 * Adjust the original formula to delay the division by 2^22 in order to
	 * minimize possible rounding errors.
	 */
	tmp = (u64)m1 * m2_int * ref_clock +
	      (((u64)m1 * m2_frac * ref_clock) >> 22);
	tmp = div_u64(tmp, 5 * div1 * div2);

	return tmp;
}

3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151
/**
 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
 * @crtc_state: state for the CRTC to select the DPLL for
 * @port_dpll_id: the active @port_dpll_id to select
 *
 * Select the given @port_dpll_id instance from the DPLLs reserved for the
 * CRTC.
 */
void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
			      enum icl_port_dpll_id port_dpll_id)
{
	struct icl_port_dpll *port_dpll =
		&crtc_state->icl_port_dplls[port_dpll_id];

	crtc_state->shared_dpll = port_dpll->pll;
	crtc_state->dpll_hw_state = port_dpll->hw_state;
}

static void icl_update_active_dpll(struct intel_atomic_state *state,
				   struct intel_crtc *crtc,
				   struct intel_encoder *encoder)
{
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	struct intel_digital_port *primary_port;
3152
	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3153 3154

	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3155 3156
		enc_to_mst(encoder)->primary :
		enc_to_dig_port(encoder);
3157

3158
	if (primary_port &&
3159 3160
	    (intel_tc_port_in_dp_alt_mode(primary_port) ||
	     intel_tc_port_in_legacy_mode(primary_port)))
3161 3162 3163 3164 3165
		port_dpll_id = ICL_PORT_DPLL_MG_PHY;

	icl_set_active_port_dpll(crtc_state, port_dpll_id);
}

3166 3167
static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
				      struct intel_crtc *crtc)
3168
{
3169
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3170 3171
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
3172 3173
	struct icl_port_dpll *port_dpll =
		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3174
	struct skl_wrpll_params pll_params = {};
3175
	int ret;
3176

3177 3178 3179 3180 3181 3182
	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
	    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
		ret = icl_calc_wrpll(crtc_state, &pll_params);
	else
		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);

3183
	if (ret)
3184
		return ret;
3185

3186 3187
	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);

3188 3189 3190
	/* this is mainly for the fastset check */
	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);

3191 3192 3193
	crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
							    &port_dpll->hw_state);

3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208
	return 0;
}

static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
				  struct intel_crtc *crtc,
				  struct intel_encoder *encoder)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	struct icl_port_dpll *port_dpll =
		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
	enum port port = encoder->port;
	unsigned long dpll_mask;

3209 3210 3211 3212 3213 3214 3215
	if (IS_ALDERLAKE_S(dev_priv)) {
		dpll_mask =
			BIT(DPLL_ID_DG1_DPLL3) |
			BIT(DPLL_ID_DG1_DPLL2) |
			BIT(DPLL_ID_ICL_DPLL1) |
			BIT(DPLL_ID_ICL_DPLL0);
	} else if (IS_DG1(dev_priv)) {
3216 3217 3218 3219 3220 3221 3222 3223 3224 3225
		if (port == PORT_D || port == PORT_E) {
			dpll_mask =
				BIT(DPLL_ID_DG1_DPLL2) |
				BIT(DPLL_ID_DG1_DPLL3);
		} else {
			dpll_mask =
				BIT(DPLL_ID_DG1_DPLL0) |
				BIT(DPLL_ID_DG1_DPLL1);
		}
	} else if (IS_ROCKETLAKE(dev_priv)) {
M
Matt Roper 已提交
3226 3227 3228 3229
		dpll_mask =
			BIT(DPLL_ID_EHL_DPLL4) |
			BIT(DPLL_ID_ICL_DPLL1) |
			BIT(DPLL_ID_ICL_DPLL0);
3230 3231
	} else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
				port != PORT_A) {
M
Matt Roper 已提交
3232 3233 3234 3235 3236
		dpll_mask =
			BIT(DPLL_ID_EHL_DPLL4) |
			BIT(DPLL_ID_ICL_DPLL1) |
			BIT(DPLL_ID_ICL_DPLL0);
	} else {
M
Matt Roper 已提交
3237
		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
M
Matt Roper 已提交
3238
	}
3239

M
Matt Roper 已提交
3240
	/* Eliminate DPLLs from consideration if reserved by HTI */
J
Jani Nikula 已提交
3241
	dpll_mask &= ~intel_hti_dpll_mask(dev_priv);
M
Matt Roper 已提交
3242

3243 3244
	port_dpll->pll = intel_find_shared_dpll(state, crtc,
						&port_dpll->hw_state,
M
Matt Roper 已提交
3245
						dpll_mask);
3246
	if (!port_dpll->pll)
3247
		return -EINVAL;
3248 3249

	intel_reference_shared_dpll(state, crtc,
3250
				    port_dpll->pll, &port_dpll->hw_state);
3251

3252
	icl_update_active_dpll(state, crtc, encoder);
3253

3254
	return 0;
3255 3256
}

3257 3258
static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
				    struct intel_crtc *crtc)
3259
{
3260 3261 3262
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
3263 3264 3265
	struct icl_port_dpll *port_dpll =
		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
	struct skl_wrpll_params pll_params = {};
3266
	int ret;
3267

3268
	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3269
	ret = icl_calc_tbt_pll(crtc_state, &pll_params);
3270
	if (ret)
3271
		return ret;
3272

3273 3274
	icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);

3275 3276
	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
	ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
3277
	if (ret)
3278 3279
		return ret;

3280 3281 3282
	/* this is mainly for the fastset check */
	icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);

3283 3284 3285
	crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
							 &port_dpll->hw_state);

3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301
	return 0;
}

static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
				struct intel_crtc *crtc,
				struct intel_encoder *encoder)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	struct icl_port_dpll *port_dpll =
		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
	enum intel_dpll_id dpll_id;
	int ret;

	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3302 3303
	port_dpll->pll = intel_find_shared_dpll(state, crtc,
						&port_dpll->hw_state,
M
Matt Roper 已提交
3304
						BIT(DPLL_ID_ICL_TBTPLL));
3305
	if (!port_dpll->pll)
3306
		return -EINVAL;
3307 3308
	intel_reference_shared_dpll(state, crtc,
				    port_dpll->pll, &port_dpll->hw_state);
3309 3310


3311 3312 3313 3314 3315
	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
							 encoder->port));
	port_dpll->pll = intel_find_shared_dpll(state, crtc,
						&port_dpll->hw_state,
M
Matt Roper 已提交
3316
						BIT(dpll_id));
3317
	if (!port_dpll->pll) {
3318
		ret = -EINVAL;
3319 3320
		goto err_unreference_tbt_pll;
	}
3321
	intel_reference_shared_dpll(state, crtc,
3322
				    port_dpll->pll, &port_dpll->hw_state);
3323

3324
	icl_update_active_dpll(state, crtc, encoder);
3325

3326
	return 0;
3327 3328 3329 3330 3331

err_unreference_tbt_pll:
	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);

3332
	return ret;
3333 3334
}

3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
static int icl_compute_dplls(struct intel_atomic_state *state,
			     struct intel_crtc *crtc,
			     struct intel_encoder *encoder)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);

	if (intel_phy_is_combo(dev_priv, phy))
		return icl_compute_combo_phy_dpll(state, crtc);
	else if (intel_phy_is_tc(dev_priv, phy))
		return icl_compute_tc_phy_dplls(state, crtc);

	MISSING_CASE(phy);

	return 0;
}

3352 3353 3354
static int icl_get_dplls(struct intel_atomic_state *state,
			 struct intel_crtc *crtc,
			 struct intel_encoder *encoder)
3355 3356
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3357
	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3358

3359
	if (intel_phy_is_combo(dev_priv, phy))
3360
		return icl_get_combo_phy_dpll(state, crtc, encoder);
3361
	else if (intel_phy_is_tc(dev_priv, phy))
3362 3363
		return icl_get_tc_phy_dplls(state, crtc, encoder);

3364
	MISSING_CASE(phy);
3365

3366
	return -EINVAL;
3367 3368
}

3369 3370 3371
static void icl_put_dplls(struct intel_atomic_state *state,
			  struct intel_crtc *crtc)
{
3372
	const struct intel_crtc_state *old_crtc_state =
3373
		intel_atomic_get_old_crtc_state(state, crtc);
3374 3375
	struct intel_crtc_state *new_crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
3376 3377
	enum icl_port_dpll_id id;

3378 3379
	new_crtc_state->shared_dpll = NULL;

3380
	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3381
		const struct icl_port_dpll *old_port_dpll =
3382
			&old_crtc_state->icl_port_dplls[id];
3383 3384
		struct icl_port_dpll *new_port_dpll =
			&new_crtc_state->icl_port_dplls[id];
3385

3386
		new_port_dpll->pll = NULL;
3387

3388 3389
		if (!old_port_dpll->pll)
			continue;
3390

3391
		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3392 3393 3394
	}
}

3395 3396 3397 3398 3399 3400 3401 3402 3403 3404
static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
				struct intel_shared_dpll *pll,
				struct intel_dpll_hw_state *hw_state)
{
	const enum intel_dpll_id id = pll->info->id;
	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
	intel_wakeref_t wakeref;
	bool ret = false;
	u32 val;

3405 3406
	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);

3407
	wakeref = intel_display_power_get_if_enabled(dev_priv,
3408
						     POWER_DOMAIN_DISPLAY_CORE);
3409 3410 3411
	if (!wakeref)
		return false;

3412
	val = intel_de_read(dev_priv, enable_reg);
3413 3414 3415
	if (!(val & PLL_ENABLE))
		goto out;

3416 3417
	hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
						  MG_REFCLKIN_CTL(tc_port));
3418 3419 3420
	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;

	hw_state->mg_clktop2_coreclkctl1 =
3421
		intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3422 3423 3424 3425
	hw_state->mg_clktop2_coreclkctl1 &=
		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;

	hw_state->mg_clktop2_hsclkctl =
3426
		intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3427 3428 3429 3430 3431 3432
	hw_state->mg_clktop2_hsclkctl &=
		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;

3433 3434 3435 3436 3437 3438
	hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
	hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
	hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
	hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
						   MG_PLL_FRAC_LOCK(tc_port));
	hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3439

3440
	hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3441
	hw_state->mg_pll_tdc_coldst_bias =
3442
		intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3443

3444
	if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456
		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
		hw_state->mg_pll_bias_mask = 0;
	} else {
		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
		hw_state->mg_pll_bias_mask = -1U;
	}

	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;

	ret = true;
out:
3457
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3458 3459 3460
	return ret;
}

3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475
static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
				 struct intel_shared_dpll *pll,
				 struct intel_dpll_hw_state *hw_state)
{
	const enum intel_dpll_id id = pll->info->id;
	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
	intel_wakeref_t wakeref;
	bool ret = false;
	u32 val;

	wakeref = intel_display_power_get_if_enabled(dev_priv,
						     POWER_DOMAIN_DISPLAY_CORE);
	if (!wakeref)
		return false;

3476
	val = intel_de_read(dev_priv, intel_tc_pll_enable_reg(dev_priv, pll));
3477 3478 3479 3480 3481 3482 3483
	if (!(val & PLL_ENABLE))
		goto out;

	/*
	 * All registers read here have the same HIP_INDEX_REG even though
	 * they are on different building blocks
	 */
3484
	hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
3485
						       DKL_REFCLKIN_CTL(tc_port));
3486 3487 3488
	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;

	hw_state->mg_clktop2_hsclkctl =
3489
		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3490 3491 3492 3493 3494 3495 3496
	hw_state->mg_clktop2_hsclkctl &=
		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;

	hw_state->mg_clktop2_coreclkctl1 =
3497
		intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3498 3499 3500
	hw_state->mg_clktop2_coreclkctl1 &=
		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;

3501
	hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port));
3502
	val = DKL_PLL_DIV0_MASK;
J
Jani Nikula 已提交
3503
	if (dev_priv->display.vbt.override_afc_startup)
3504 3505
		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
	hw_state->mg_pll_div0 &= val;
3506

3507
	hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3508 3509 3510
	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);

3511
	hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3512 3513 3514 3515 3516
	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
				 DKL_PLL_SSC_STEP_LEN_MASK |
				 DKL_PLL_SSC_STEP_NUM_MASK |
				 DKL_PLL_SSC_EN);

3517
	hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3518 3519 3520 3521
	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);

	hw_state->mg_pll_tdc_coldst_bias =
3522
		intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3523 3524 3525 3526 3527 3528 3529 3530 3531
	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);

	ret = true;
out:
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
	return ret;
}

3532 3533
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
				 struct intel_shared_dpll *pll,
3534 3535
				 struct intel_dpll_hw_state *hw_state,
				 i915_reg_t enable_reg)
3536 3537
{
	const enum intel_dpll_id id = pll->info->id;
3538
	intel_wakeref_t wakeref;
3539
	bool ret = false;
3540
	u32 val;
3541

3542
	wakeref = intel_display_power_get_if_enabled(dev_priv,
3543
						     POWER_DOMAIN_DISPLAY_CORE);
3544
	if (!wakeref)
3545 3546
		return false;

3547
	val = intel_de_read(dev_priv, enable_reg);
3548 3549 3550
	if (!(val & PLL_ENABLE))
		goto out;

3551 3552 3553 3554
	if (IS_ALDERLAKE_S(dev_priv)) {
		hw_state->cfgcr0 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR0(id));
		hw_state->cfgcr1 = intel_de_read(dev_priv, ADLS_DPLL_CFGCR1(id));
	} else if (IS_DG1(dev_priv)) {
3555 3556 3557
		hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
		hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
	} else if (IS_ROCKETLAKE(dev_priv)) {
M
Matt Roper 已提交
3558 3559 3560 3561
		hw_state->cfgcr0 = intel_de_read(dev_priv,
						 RKL_DPLL_CFGCR0(id));
		hw_state->cfgcr1 = intel_de_read(dev_priv,
						 RKL_DPLL_CFGCR1(id));
3562
	} else if (DISPLAY_VER(dev_priv) >= 12) {
3563 3564 3565 3566
		hw_state->cfgcr0 = intel_de_read(dev_priv,
						 TGL_DPLL_CFGCR0(id));
		hw_state->cfgcr1 = intel_de_read(dev_priv,
						 TGL_DPLL_CFGCR1(id));
J
Jani Nikula 已提交
3567
		if (dev_priv->display.vbt.override_afc_startup) {
3568 3569 3570
			hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
			hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
		}
3571
	} else {
3572 3573
		if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
		    id == DPLL_ID_EHL_DPLL4) {
3574 3575 3576 3577
			hw_state->cfgcr0 = intel_de_read(dev_priv,
							 ICL_DPLL_CFGCR0(4));
			hw_state->cfgcr1 = intel_de_read(dev_priv,
							 ICL_DPLL_CFGCR1(4));
3578
		} else {
3579 3580 3581 3582
			hw_state->cfgcr0 = intel_de_read(dev_priv,
							 ICL_DPLL_CFGCR0(id));
			hw_state->cfgcr1 = intel_de_read(dev_priv,
							 ICL_DPLL_CFGCR1(id));
3583
		}
3584
	}
3585 3586 3587

	ret = true;
out:
3588
	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3589 3590 3591
	return ret;
}

3592 3593 3594 3595
static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
				   struct intel_shared_dpll *pll,
				   struct intel_dpll_hw_state *hw_state)
{
3596
	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3597 3598

	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3599 3600 3601 3602 3603 3604 3605 3606 3607
}

static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
				 struct intel_shared_dpll *pll,
				 struct intel_dpll_hw_state *hw_state)
{
	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
}

3608 3609 3610 3611 3612
static void icl_dpll_write(struct drm_i915_private *dev_priv,
			   struct intel_shared_dpll *pll)
{
	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
	const enum intel_dpll_id id = pll->info->id;
3613
	i915_reg_t cfgcr0_reg, cfgcr1_reg, div0_reg = INVALID_MMIO_REG;
3614

3615 3616 3617 3618
	if (IS_ALDERLAKE_S(dev_priv)) {
		cfgcr0_reg = ADLS_DPLL_CFGCR0(id);
		cfgcr1_reg = ADLS_DPLL_CFGCR1(id);
	} else if (IS_DG1(dev_priv)) {
3619 3620 3621
		cfgcr0_reg = DG1_DPLL_CFGCR0(id);
		cfgcr1_reg = DG1_DPLL_CFGCR1(id);
	} else if (IS_ROCKETLAKE(dev_priv)) {
M
Matt Roper 已提交
3622 3623
		cfgcr0_reg = RKL_DPLL_CFGCR0(id);
		cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3624
	} else if (DISPLAY_VER(dev_priv) >= 12) {
3625 3626
		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3627
		div0_reg = TGL_DPLL0_DIV0(id);
3628
	} else {
3629 3630
		if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
		    id == DPLL_ID_EHL_DPLL4) {
3631 3632 3633 3634 3635 3636
			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
		} else {
			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
		}
3637
	}
3638

3639 3640
	intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
	intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
J
Jani Nikula 已提交
3641
	drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
3642
			 !i915_mmio_reg_valid(div0_reg));
J
Jani Nikula 已提交
3643
	if (dev_priv->display.vbt.override_afc_startup &&
3644
	    i915_mmio_reg_valid(div0_reg))
3645 3646
		intel_de_rmw(dev_priv, div0_reg,
			     TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0);
3647
	intel_de_posting_read(dev_priv, cfgcr1_reg);
3648 3649 3650 3651 3652 3653
}

static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
			     struct intel_shared_dpll *pll)
{
	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3654
	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3655

3656 3657 3658 3659 3660 3661
	/*
	 * Some of the following registers have reserved fields, so program
	 * these with RMW based on a mask. The mask can be fixed or generated
	 * during the calc/readout phase if the mask depends on some other HW
	 * state like refclk, see icl_calc_mg_pll_state().
	 */
3662 3663
	intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port),
		     MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl);
3664

3665 3666 3667
	intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port),
		     MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK,
		     hw_state->mg_clktop2_coreclkctl1);
3668

3669 3670 3671 3672 3673 3674
	intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port),
		     MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
		     MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
		     MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
		     MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK,
		     hw_state->mg_clktop2_hsclkctl);
3675

3676 3677 3678 3679 3680 3681
	intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
	intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
	intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
	intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
		       hw_state->mg_pll_frac_lock);
	intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3682

3683 3684
	intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port),
		     hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias);
3685

3686 3687 3688
	intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port),
		     hw_state->mg_pll_tdc_coldst_bias_mask,
		     hw_state->mg_pll_tdc_coldst_bias);
3689

3690
	intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3691 3692
}

3693 3694 3695
static void dkl_pll_write(struct drm_i915_private *dev_priv,
			  struct intel_shared_dpll *pll)
{
3696 3697 3698 3699 3700 3701 3702 3703 3704
	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
	u32 val;

	/*
	 * All registers programmed here have the same HIP_INDEX_REG even
	 * though on different building block
	 */
	/* All the registers are RMW */
3705
	val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3706 3707
	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
	val |= hw_state->mg_refclkin_ctl;
3708
	intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3709

3710
	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3711 3712
	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
	val |= hw_state->mg_clktop2_coreclkctl1;
3713
	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3714

3715
	val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3716 3717 3718 3719 3720
	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
	val |= hw_state->mg_clktop2_hsclkctl;
3721
	intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3722

3723
	val = DKL_PLL_DIV0_MASK;
J
Jani Nikula 已提交
3724
	if (dev_priv->display.vbt.override_afc_startup)
3725
		val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
3726
	intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
3727
			  hw_state->mg_pll_div0);
3728

3729
	val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port));
3730 3731 3732
	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
	val |= hw_state->mg_pll_div1;
3733
	intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3734

3735
	val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port));
3736 3737 3738 3739 3740
	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
		 DKL_PLL_SSC_STEP_LEN_MASK |
		 DKL_PLL_SSC_STEP_NUM_MASK |
		 DKL_PLL_SSC_EN);
	val |= hw_state->mg_pll_ssc;
3741
	intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3742

3743
	val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port));
3744 3745 3746
	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
	val |= hw_state->mg_pll_bias;
3747
	intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3748

3749
	val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3750 3751 3752
	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
	val |= hw_state->mg_pll_tdc_coldst_bias;
3753
	intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3754

3755
	intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3756 3757
}

3758 3759 3760
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
				 struct intel_shared_dpll *pll,
				 i915_reg_t enable_reg)
3761
{
3762
	intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE);
3763 3764 3765 3766 3767

	/*
	 * The spec says we need to "wait" but it also says it should be
	 * immediate.
	 */
3768
	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3769 3770
		drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
			pll->info->id);
3771
}
3772

3773 3774 3775 3776
static void icl_pll_enable(struct drm_i915_private *dev_priv,
			   struct intel_shared_dpll *pll,
			   i915_reg_t enable_reg)
{
3777
	intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
3778

3779
	/* Timeout is actually 600us. */
3780
	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3781
		drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3782 3783
}

3784 3785 3786 3787
static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
{
	u32 val;

3788
	if (!(IS_ALDERLAKE_P(i915) && IS_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) ||
3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802
	    pll->info->id != DPLL_ID_ICL_DPLL0)
		return;
	/*
	 * Wa_16011069516:adl-p[a0]
	 *
	 * All CMTG regs are unreliable until CMTG clock gating is disabled,
	 * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
	 * sanity check this assumption with a double read, which presumably
	 * returns the correct value even with clock gating on.
	 *
	 * Instead of the usual place for workarounds we apply this one here,
	 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
	 */
	val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
3803
	val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING);
3804 3805 3806 3807
	if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
		drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
}

3808 3809 3810
static void combo_pll_enable(struct drm_i915_private *dev_priv,
			     struct intel_shared_dpll *pll)
{
3811
	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3812

3813
	if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
3814 3815 3816 3817 3818 3819 3820 3821
	    pll->info->id == DPLL_ID_EHL_DPLL4) {

		/*
		 * We need to disable DC states when this DPLL is enabled.
		 * This can be done by taking a reference on DPLL4 power
		 * domain.
		 */
		pll->wakeref = intel_display_power_get(dev_priv,
3822
						       POWER_DOMAIN_DC_OFF);
3823 3824
	}

3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
	icl_pll_power_enable(dev_priv, pll, enable_reg);

	icl_dpll_write(dev_priv, pll);

	/*
	 * DVFS pre sequence would be here, but in our driver the cdclk code
	 * paths should already be setting the appropriate voltage, hence we do
	 * nothing here.
	 */

3835
	icl_pll_enable(dev_priv, pll, enable_reg);
3836

3837 3838
	adlp_cmtg_clock_gating_wa(dev_priv, pll);

3839
	/* DVFS post sequence would be here. See the comment above. */
3840 3841
}

3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859
static void tbt_pll_enable(struct drm_i915_private *dev_priv,
			   struct intel_shared_dpll *pll)
{
	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);

	icl_dpll_write(dev_priv, pll);

	/*
	 * DVFS pre sequence would be here, but in our driver the cdclk code
	 * paths should already be setting the appropriate voltage, hence we do
	 * nothing here.
	 */

	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);

	/* DVFS post sequence would be here. See the comment above. */
}

3860 3861 3862
static void mg_pll_enable(struct drm_i915_private *dev_priv,
			  struct intel_shared_dpll *pll)
{
3863
	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3864

3865 3866
	icl_pll_power_enable(dev_priv, pll, enable_reg);

3867
	if (DISPLAY_VER(dev_priv) >= 12)
3868 3869 3870
		dkl_pll_write(dev_priv, pll);
	else
		icl_mg_pll_write(dev_priv, pll);
3871 3872 3873 3874 3875 3876 3877

	/*
	 * DVFS pre sequence would be here, but in our driver the cdclk code
	 * paths should already be setting the appropriate voltage, hence we do
	 * nothing here.
	 */

3878
	icl_pll_enable(dev_priv, pll, enable_reg);
3879 3880

	/* DVFS post sequence would be here. See the comment above. */
3881 3882
}

3883
static void icl_pll_disable(struct drm_i915_private *dev_priv,
3884 3885
			    struct intel_shared_dpll *pll,
			    i915_reg_t enable_reg)
3886 3887 3888 3889 3890 3891
{
	/* The first steps are done by intel_ddi_post_disable(). */

	/*
	 * DVFS pre sequence would be here, but in our driver the cdclk code
	 * paths should already be setting the appropriate voltage, hence we do
B
Bhaskar Chowdhury 已提交
3892
	 * nothing here.
3893 3894
	 */

3895
	intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0);
3896 3897

	/* Timeout is actually 1us. */
3898
	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3899
		drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3900 3901 3902

	/* DVFS post sequence would be here. See the comment above. */

3903
	intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0);
3904 3905 3906 3907 3908

	/*
	 * The spec says we need to "wait" but it also says it should be
	 * immediate.
	 */
3909
	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3910 3911
		drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
			pll->info->id);
3912 3913 3914 3915 3916
}

static void combo_pll_disable(struct drm_i915_private *dev_priv,
			      struct intel_shared_dpll *pll)
{
3917
	i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3918

3919
	icl_pll_disable(dev_priv, pll, enable_reg);
3920

3921
	if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
3922
	    pll->info->id == DPLL_ID_EHL_DPLL4)
3923
		intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF,
3924
					pll->wakeref);
3925
}
3926

3927 3928 3929 3930
static void tbt_pll_disable(struct drm_i915_private *dev_priv,
			    struct intel_shared_dpll *pll)
{
	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3931 3932 3933 3934 3935
}

static void mg_pll_disable(struct drm_i915_private *dev_priv,
			   struct intel_shared_dpll *pll)
{
3936
	i915_reg_t enable_reg = intel_tc_pll_enable_reg(dev_priv, pll);
3937 3938

	icl_pll_disable(dev_priv, pll, enable_reg);
3939 3940
}

3941 3942 3943
static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
{
	/* No SSC ref */
3944
	i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
3945 3946
}

3947
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3948
			      const struct intel_dpll_hw_state *hw_state)
3949
{
3950
	drm_dbg_kms(&dev_priv->drm,
3951
		    "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, div0: 0x%x, "
3952 3953 3954 3955 3956 3957
		    "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
		    "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
		    "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
		    "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
		    "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
		    hw_state->cfgcr0, hw_state->cfgcr1,
3958
		    hw_state->div0,
3959 3960 3961 3962 3963 3964 3965 3966 3967 3968
		    hw_state->mg_refclkin_ctl,
		    hw_state->mg_clktop2_coreclkctl1,
		    hw_state->mg_clktop2_hsclkctl,
		    hw_state->mg_pll_div0,
		    hw_state->mg_pll_div1,
		    hw_state->mg_pll_lf,
		    hw_state->mg_pll_frac_lock,
		    hw_state->mg_pll_ssc,
		    hw_state->mg_pll_bias,
		    hw_state->mg_pll_tdc_coldst_bias);
3969 3970
}

3971
static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3972
	.enable = combo_pll_enable,
3973
	.disable = combo_pll_disable,
3974
	.get_hw_state = combo_pll_get_hw_state,
3975
	.get_freq = icl_ddi_combo_pll_get_freq,
3976 3977 3978 3979 3980 3981
};

static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
	.enable = tbt_pll_enable,
	.disable = tbt_pll_disable,
	.get_hw_state = tbt_pll_get_hw_state,
3982
	.get_freq = icl_ddi_tbt_pll_get_freq,
3983 3984
};

3985
static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3986
	.enable = mg_pll_enable,
3987
	.disable = mg_pll_disable,
3988
	.get_hw_state = mg_pll_get_hw_state,
3989
	.get_freq = icl_ddi_mg_pll_get_freq,
3990 3991
};

3992
static const struct dpll_info icl_plls[] = {
3993 3994 3995
	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3996 3997 3998 3999
	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4000 4001 4002 4003 4004
	{ },
};

static const struct intel_dpll_mgr icl_pll_mgr = {
	.dpll_info = icl_plls,
4005
	.compute_dplls = icl_compute_dplls,
4006
	.get_dplls = icl_get_dplls,
4007
	.put_dplls = icl_put_dplls,
4008
	.update_active_dpll = icl_update_active_dpll,
4009
	.update_ref_clks = icl_update_dpll_ref_clks,
4010 4011 4012
	.dump_hw_state = icl_dump_hw_state,
};

L
Lucas De Marchi 已提交
4013 4014 4015
static const struct dpll_info ehl_plls[] = {
	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4016
	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
L
Lucas De Marchi 已提交
4017 4018 4019 4020 4021
	{ },
};

static const struct intel_dpll_mgr ehl_pll_mgr = {
	.dpll_info = ehl_plls,
4022
	.compute_dplls = icl_compute_dplls,
4023
	.get_dplls = icl_get_dplls,
4024
	.put_dplls = icl_put_dplls,
4025
	.update_ref_clks = icl_update_dpll_ref_clks,
L
Lucas De Marchi 已提交
4026 4027 4028
	.dump_hw_state = icl_dump_hw_state,
};

4029 4030 4031 4032
static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
	.enable = mg_pll_enable,
	.disable = mg_pll_disable,
	.get_hw_state = dkl_pll_get_hw_state,
4033
	.get_freq = icl_ddi_mg_pll_get_freq,
4034 4035
};

V
Vandita Kulkarni 已提交
4036 4037 4038 4039
static const struct dpll_info tgl_plls[] = {
	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4040 4041 4042 4043 4044 4045
	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
V
Vandita Kulkarni 已提交
4046 4047 4048 4049 4050
	{ },
};

static const struct intel_dpll_mgr tgl_pll_mgr = {
	.dpll_info = tgl_plls,
4051
	.compute_dplls = icl_compute_dplls,
V
Vandita Kulkarni 已提交
4052 4053
	.get_dplls = icl_get_dplls,
	.put_dplls = icl_put_dplls,
4054
	.update_active_dpll = icl_update_active_dpll,
4055
	.update_ref_clks = icl_update_dpll_ref_clks,
V
Vandita Kulkarni 已提交
4056 4057 4058
	.dump_hw_state = icl_dump_hw_state,
};

M
Matt Roper 已提交
4059 4060 4061 4062 4063 4064 4065 4066 4067
static const struct dpll_info rkl_plls[] = {
	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
	{ },
};

static const struct intel_dpll_mgr rkl_pll_mgr = {
	.dpll_info = rkl_plls,
4068
	.compute_dplls = icl_compute_dplls,
M
Matt Roper 已提交
4069 4070 4071 4072 4073 4074
	.get_dplls = icl_get_dplls,
	.put_dplls = icl_put_dplls,
	.update_ref_clks = icl_update_dpll_ref_clks,
	.dump_hw_state = icl_dump_hw_state,
};

4075 4076 4077 4078 4079 4080 4081 4082 4083 4084
static const struct dpll_info dg1_plls[] = {
	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
	{ },
};

static const struct intel_dpll_mgr dg1_pll_mgr = {
	.dpll_info = dg1_plls,
4085
	.compute_dplls = icl_compute_dplls,
4086 4087 4088 4089 4090 4091
	.get_dplls = icl_get_dplls,
	.put_dplls = icl_put_dplls,
	.update_ref_clks = icl_update_dpll_ref_clks,
	.dump_hw_state = icl_dump_hw_state,
};

4092 4093 4094 4095 4096 4097 4098 4099 4100 4101
static const struct dpll_info adls_plls[] = {
	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
	{ "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
	{ "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
	{ },
};

static const struct intel_dpll_mgr adls_pll_mgr = {
	.dpll_info = adls_plls,
4102
	.compute_dplls = icl_compute_dplls,
4103 4104 4105 4106 4107 4108
	.get_dplls = icl_get_dplls,
	.put_dplls = icl_put_dplls,
	.update_ref_clks = icl_update_dpll_ref_clks,
	.dump_hw_state = icl_dump_hw_state,
};

4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121
static const struct dpll_info adlp_plls[] = {
	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
	{ },
};

static const struct intel_dpll_mgr adlp_pll_mgr = {
	.dpll_info = adlp_plls,
4122
	.compute_dplls = icl_compute_dplls,
4123 4124 4125 4126 4127 4128 4129
	.get_dplls = icl_get_dplls,
	.put_dplls = icl_put_dplls,
	.update_active_dpll = icl_update_active_dpll,
	.update_ref_clks = icl_update_dpll_ref_clks,
	.dump_hw_state = icl_dump_hw_state,
};

4130 4131
/**
 * intel_shared_dpll_init - Initialize shared DPLLs
4132
 * @dev_priv: i915 device
4133
 *
4134
 * Initialize shared DPLLs for @dev_priv.
4135
 */
4136
void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
4137
{
4138 4139
	const struct intel_dpll_mgr *dpll_mgr = NULL;
	const struct dpll_info *dpll_info;
4140
	int i;
4141

4142 4143
	mutex_init(&dev_priv->display.dpll.lock);

4144
	if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
4145 4146 4147
		/* No shared DPLLs on DG2; port PLLs are part of the PHY */
		dpll_mgr = NULL;
	else if (IS_ALDERLAKE_P(dev_priv))
4148 4149
		dpll_mgr = &adlp_pll_mgr;
	else if (IS_ALDERLAKE_S(dev_priv))
4150 4151
		dpll_mgr = &adls_pll_mgr;
	else if (IS_DG1(dev_priv))
4152 4153
		dpll_mgr = &dg1_pll_mgr;
	else if (IS_ROCKETLAKE(dev_priv))
M
Matt Roper 已提交
4154
		dpll_mgr = &rkl_pll_mgr;
4155
	else if (DISPLAY_VER(dev_priv) >= 12)
V
Vandita Kulkarni 已提交
4156
		dpll_mgr = &tgl_pll_mgr;
4157
	else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
L
Lucas De Marchi 已提交
4158
		dpll_mgr = &ehl_pll_mgr;
4159
	else if (DISPLAY_VER(dev_priv) >= 11)
4160
		dpll_mgr = &icl_pll_mgr;
4161
	else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4162
		dpll_mgr = &bxt_pll_mgr;
4163
	else if (DISPLAY_VER(dev_priv) == 9)
4164
		dpll_mgr = &skl_pll_mgr;
4165
	else if (HAS_DDI(dev_priv))
4166
		dpll_mgr = &hsw_pll_mgr;
4167
	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4168
		dpll_mgr = &pch_pll_mgr;
4169

4170
	if (!dpll_mgr) {
4171
		dev_priv->display.dpll.num_shared_dpll = 0;
4172 4173 4174
		return;
	}

4175 4176
	dpll_info = dpll_mgr->dpll_info;

4177
	for (i = 0; dpll_info[i].name; i++) {
4178 4179 4180 4181
		if (drm_WARN_ON(&dev_priv->drm,
				i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
			break;

4182
		drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
4183
		dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
4184 4185
	}

4186 4187
	dev_priv->display.dpll.mgr = dpll_mgr;
	dev_priv->display.dpll.num_shared_dpll = i;
4188
}
4189

4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208
/**
 * intel_compute_shared_dplls - compute DPLL state CRTC and encoder combination
 * @state: atomic state
 * @crtc: CRTC to compute DPLLs for
 * @encoder: encoder
 *
 * This function computes the DPLL state for the given CRTC and encoder.
 *
 * The new configuration in the atomic commit @state is made effective by
 * calling intel_shared_dpll_swap_state().
 *
 * Returns:
 * 0 on success, negative error code on falure.
 */
int intel_compute_shared_dplls(struct intel_atomic_state *state,
			       struct intel_crtc *crtc,
			       struct intel_encoder *encoder)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4209
	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4210 4211 4212 4213 4214 4215 4216

	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
		return -EINVAL;

	return dpll_mgr->compute_dplls(state, crtc, encoder);
}

4217
/**
4218 4219 4220
 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
 * @state: atomic state
 * @crtc: CRTC to reserve DPLLs for
4221 4222
 * @encoder: encoder
 *
4223 4224 4225 4226 4227 4228 4229 4230 4231
 * This function reserves all required DPLLs for the given CRTC and encoder
 * combination in the current atomic commit @state and the new @crtc atomic
 * state.
 *
 * The new configuration in the atomic commit @state is made effective by
 * calling intel_shared_dpll_swap_state().
 *
 * The reserved DPLLs should be released by calling
 * intel_release_shared_dplls().
4232 4233
 *
 * Returns:
4234 4235
 * 0 if all required DPLLs were successfully reserved,
 * negative error code otherwise.
4236
 */
4237 4238 4239
int intel_reserve_shared_dplls(struct intel_atomic_state *state,
			       struct intel_crtc *crtc,
			       struct intel_encoder *encoder)
4240
{
4241
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4242
	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4243

4244
	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4245
		return -EINVAL;
4246

4247
	return dpll_mgr->get_dplls(state, crtc, encoder);
4248
}
4249 4250

/**
4251
 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4252
 * @state: atomic state
4253
 * @crtc: crtc from which the DPLLs are to be released
4254
 *
4255 4256 4257 4258 4259
 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
 * from the current atomic commit @state and the old @crtc atomic state.
 *
 * The new configuration in the atomic commit @state is made effective by
 * calling intel_shared_dpll_swap_state().
4260
 */
4261 4262
void intel_release_shared_dplls(struct intel_atomic_state *state,
				struct intel_crtc *crtc)
4263
{
4264
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4265
	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4266 4267 4268 4269 4270 4271 4272 4273 4274

	/*
	 * FIXME: this function is called for every platform having a
	 * compute_clock hook, even though the platform doesn't yet support
	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
	 * called on those.
	 */
	if (!dpll_mgr)
		return;
4275

4276
	dpll_mgr->put_dplls(state, crtc);
4277
}
4278

4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293
/**
 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
 * @state: atomic state
 * @crtc: the CRTC for which to update the active DPLL
 * @encoder: encoder determining the type of port DPLL
 *
 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
 * DPLL selected will be based on the current mode of the encoder's port.
 */
void intel_update_active_dpll(struct intel_atomic_state *state,
			      struct intel_crtc *crtc,
			      struct intel_encoder *encoder)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4294
	const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
4295

4296
	if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4297 4298 4299 4300 4301
		return;

	dpll_mgr->update_active_dpll(state, crtc, encoder);
}

4302 4303 4304 4305
/**
 * intel_dpll_get_freq - calculate the DPLL's output frequency
 * @i915: i915 device
 * @pll: DPLL for which to calculate the output frequency
4306
 * @pll_state: DPLL state from which to calculate the output frequency
4307
 *
4308
 * Return the output frequency corresponding to @pll's passed in @pll_state.
4309
 */
4310
int intel_dpll_get_freq(struct drm_i915_private *i915,
4311 4312
			const struct intel_shared_dpll *pll,
			const struct intel_dpll_hw_state *pll_state)
4313
{
4314 4315
	if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
		return 0;
4316

4317
	return pll->info->funcs->get_freq(i915, pll, pll_state);
4318 4319
}

4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332
/**
 * intel_dpll_get_hw_state - readout the DPLL's hardware state
 * @i915: i915 device
 * @pll: DPLL for which to calculate the output frequency
 * @hw_state: DPLL's hardware state
 *
 * Read out @pll's hardware state into @hw_state.
 */
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
			     struct intel_shared_dpll *pll,
			     struct intel_dpll_hw_state *hw_state)
{
	return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4333 4334
}

4335 4336 4337 4338 4339
static void readout_dpll_hw_state(struct drm_i915_private *i915,
				  struct intel_shared_dpll *pll)
{
	struct intel_crtc *crtc;

4340
	pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4341

4342 4343
	if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
	    pll->on &&
4344 4345
	    pll->info->id == DPLL_ID_EHL_DPLL4) {
		pll->wakeref = intel_display_power_get(i915,
4346
						       POWER_DOMAIN_DC_OFF);
4347 4348
	}

4349
	pll->state.pipe_mask = 0;
4350 4351 4352 4353 4354
	for_each_intel_crtc(&i915->drm, crtc) {
		struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);

		if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4355
			intel_reference_shared_dpll_crtc(crtc, pll, &pll->state);
4356
	}
4357
	pll->active_mask = pll->state.pipe_mask;
4358 4359

	drm_dbg_kms(&i915->drm,
4360 4361
		    "%s hw state readout: pipe_mask 0x%x, on %i\n",
		    pll->info->name, pll->state.pipe_mask, pll->on);
4362 4363
}

4364
void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
4365
{
4366 4367
	if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
		i915->display.dpll.mgr->update_ref_clks(i915);
4368 4369 4370 4371 4372
}

void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
{
	int i;
4373

4374 4375
	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
		readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
4376 4377 4378 4379 4380
}

static void sanitize_dpll_state(struct drm_i915_private *i915,
				struct intel_shared_dpll *pll)
{
4381 4382 4383 4384 4385 4386
	if (!pll->on)
		return;

	adlp_cmtg_clock_gating_wa(i915, pll);

	if (pll->active_mask)
4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400
		return;

	drm_dbg_kms(&i915->drm,
		    "%s enabled but not in use, disabling\n",
		    pll->info->name);

	pll->info->funcs->disable(i915, pll);
	pll->on = false;
}

void intel_dpll_sanitize_state(struct drm_i915_private *i915)
{
	int i;

4401 4402
	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
		sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
4403 4404
}

4405
/**
4406
 * intel_dpll_dump_hw_state - write hw_state to dmesg
4407 4408 4409
 * @dev_priv: i915 drm device
 * @hw_state: hw state to be written to the log
 *
4410
 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4411 4412
 */
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4413
			      const struct intel_dpll_hw_state *hw_state)
4414
{
4415 4416
	if (dev_priv->display.dpll.mgr) {
		dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
4417 4418 4419 4420
	} else {
		/* fallback for platforms that don't use the shared dpll
		 * infrastructure
		 */
4421 4422 4423 4424 4425 4426 4427
		drm_dbg_kms(&dev_priv->drm,
			    "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
			    "fp0: 0x%x, fp1: 0x%x\n",
			    hw_state->dpll,
			    hw_state->dpll_md,
			    hw_state->fp0,
			    hw_state->fp1);
4428 4429
	}
}
4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447

static void
verify_single_dpll_state(struct drm_i915_private *dev_priv,
			 struct intel_shared_dpll *pll,
			 struct intel_crtc *crtc,
			 struct intel_crtc_state *new_crtc_state)
{
	struct intel_dpll_hw_state dpll_hw_state;
	u8 pipe_mask;
	bool active;

	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));

	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);

	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);

	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
4448
		I915_STATE_WARN(dev_priv, !pll->on && pll->active_mask,
4449
				"pll in active use but not on in sw tracking\n");
4450
		I915_STATE_WARN(dev_priv, pll->on && !pll->active_mask,
4451
				"pll is on but not used by any active pipe\n");
4452
		I915_STATE_WARN(dev_priv, pll->on != active,
4453 4454 4455 4456 4457
				"pll on state mismatch (expected %i, found %i)\n",
				pll->on, active);
	}

	if (!crtc) {
4458 4459
		I915_STATE_WARN(dev_priv,
				pll->active_mask & ~pll->state.pipe_mask,
4460 4461 4462 4463 4464 4465 4466 4467 4468
				"more active pll users than references: 0x%x vs 0x%x\n",
				pll->active_mask, pll->state.pipe_mask);

		return;
	}

	pipe_mask = BIT(crtc->pipe);

	if (new_crtc_state->hw.active)
4469
		I915_STATE_WARN(dev_priv, !(pll->active_mask & pipe_mask),
4470 4471 4472
				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
				pipe_name(crtc->pipe), pll->active_mask);
	else
4473
		I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4474 4475 4476
				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
				pipe_name(crtc->pipe), pll->active_mask);

4477
	I915_STATE_WARN(dev_priv, !(pll->state.pipe_mask & pipe_mask),
4478 4479 4480
			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
			pipe_mask, pll->state.pipe_mask);

4481 4482
	I915_STATE_WARN(dev_priv,
			pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501
					  sizeof(dpll_hw_state)),
			"pll hw state mismatch\n");
}

void intel_shared_dpll_state_verify(struct intel_crtc *crtc,
				    struct intel_crtc_state *old_crtc_state,
				    struct intel_crtc_state *new_crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);

	if (new_crtc_state->shared_dpll)
		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll,
					 crtc, new_crtc_state);

	if (old_crtc_state->shared_dpll &&
	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
		u8 pipe_mask = BIT(crtc->pipe);
		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;

4502
		I915_STATE_WARN(dev_priv, pll->active_mask & pipe_mask,
4503 4504
				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
				pipe_name(crtc->pipe), pll->active_mask);
4505
		I915_STATE_WARN(dev_priv, pll->state.pipe_mask & pipe_mask,
4506 4507 4508 4509 4510 4511 4512 4513 4514
				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
				pipe_name(crtc->pipe), pll->state.pipe_mask);
	}
}

void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
{
	int i;

4515 4516
	for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
		verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
4517 4518
					 NULL, NULL);
}