intel_dp_link_training.c 24.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2008-2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

24
#include "intel_display_types.h"
25
#include "intel_dp.h"
26
#include "intel_dp_link_training.h"
27

28
static void
29
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
30 31 32 33 34 35 36
{

	DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
		      link_status[0], link_status[1], link_status[2],
		      link_status[3], link_status[4], link_status[5]);
}

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
static int intel_dp_lttpr_count(struct intel_dp *intel_dp)
{
	int count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);

	/*
	 * Pretend no LTTPRs in case of LTTPR detection error, or
	 * if too many (>8) LTTPRs are detected. This translates to link
	 * training in transparent mode.
	 */
	return count <= 0 ? 0 : count;
}

static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
}

static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
				     char *buf, size_t buf_size)
{
	if (dp_phy == DP_PHY_DPRX)
		snprintf(buf, buf_size, "DPRX");
	else
		snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);

	return buf;
}

static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
				   enum drm_dp_phy dp_phy)
{
	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
}

static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
					 enum drm_dp_phy dp_phy)
{
	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
	char phy_name[10];

	intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));

	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "failed to read the PHY caps for %s\n",
			    phy_name);
		return;
	}

	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
		    "%s PHY capabilities: %*ph\n",
		    phy_name,
		    (int)sizeof(intel_dp->lttpr_phy_caps[0]),
		    phy_caps);
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
	if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
					  intel_dp->lttpr_common_caps) < 0) {
		memset(intel_dp->lttpr_common_caps, 0,
		       sizeof(intel_dp->lttpr_common_caps));
		return false;
	}

	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
		    "LTTPR common capabilities: %*ph\n",
		    (int)sizeof(intel_dp->lttpr_common_caps),
		    intel_dp->lttpr_common_caps);

	return true;
}

static bool
intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
{
	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;

	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}

/**
 * intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode
 * @intel_dp: Intel DP struct
 *
124 125 126 127 128 129 130 131 132 133
 * Read the LTTPR common capabilities, switch to non-transparent link training
 * mode if any is detected and read the PHY capabilities for all detected
 * LTTPRs. In case of an LTTPR detection error or if the number of
 * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
 * transparent mode link training mode.
 *
 * Returns:
 *   >0  if LTTPRs were detected and the non-transparent LT mode was set
 *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
 *       detection failure and the transparent LT mode was set
134 135 136
 */
int intel_dp_lttpr_init(struct intel_dp *intel_dp)
{
137 138 139 140
	int lttpr_count;
	bool ret;
	int i;

141 142 143
	if (intel_dp_is_edp(intel_dp))
		return 0;

144
	ret = intel_dp_read_lttpr_common_caps(intel_dp);
145 146 147

	/*
	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
148 149
	 * non-transparent mode and the disable->enable non-transparent mode
	 * sequence.
150 151 152
	 */
	intel_dp_set_lttpr_transparent_mode(intel_dp, true);

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
	if (!ret)
		return 0;

	lttpr_count = intel_dp_lttpr_count(intel_dp);

	/*
	 * In case of unsupported number of LTTPRs or failing to switch to
	 * non-transparent mode fall-back to transparent link training mode,
	 * still taking into account any LTTPR common lane- rate/count limits.
	 */
	if (lttpr_count == 0)
		return 0;

	if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");

		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
		intel_dp_reset_lttpr_count(intel_dp);

		return 0;
	}

	for (i = 0; i < lttpr_count; i++)
		intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));

	return lttpr_count;
180
}
181
EXPORT_SYMBOL(intel_dp_lttpr_init);
182

183
static u8 dp_voltage_max(u8 preemph)
184
{
185 186 187 188 189 190 191 192
	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
193
	default:
194
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
195 196 197
	}
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
	else
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}

static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
		return DP_TRAIN_PRE_EMPH_LEVEL_3;
	else
		return DP_TRAIN_PRE_EMPH_LEVEL_2;
}

static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	int lttpr_count = intel_dp_lttpr_count(intel_dp);

	drm_WARN_ON_ONCE(&i915->drm, lttpr_count == 0 && dp_phy != DP_PHY_DPRX);

	return lttpr_count == 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}

static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
				   const struct intel_crtc_state *crtc_state,
				   enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 voltage_max;

	/*
	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
	 * the DPRX_PHY we train.
	 */
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
	else
		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);

	drm_WARN_ON_ONCE(&i915->drm,
			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);

	return voltage_max;
}

static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
				   enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 preemph_max;

	/*
	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
	 * the DPRX_PHY we train.
	 */
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		preemph_max = intel_dp->preemph_max(intel_dp);
	else
		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);

	drm_WARN_ON_ONCE(&i915->drm,
			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);

	return preemph_max;
}

277 278 279
void
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
			  const struct intel_crtc_state *crtc_state,
280
			  enum drm_dp_phy dp_phy,
281
			  const u8 link_status[DP_LINK_STATUS_SIZE])
282
{
283 284
	u8 v = 0;
	u8 p = 0;
285
	int lane;
286 287
	u8 voltage_max;
	u8 preemph_max;
288

289
	for (lane = 0; lane < crtc_state->lane_count; lane++) {
290 291
		v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
		p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
292 293
	}

294
	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
295 296 297
	if (p >= preemph_max)
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;

298 299
	v = min(v, dp_voltage_max(p));

300
	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
301 302 303
	if (v >= voltage_max)
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;

304 305 306 307
	for (lane = 0; lane < 4; lane++)
		intel_dp->train_set[lane] = v | p;
}

308 309 310 311 312 313 314 315
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
					     enum drm_dp_phy dp_phy)
{
	return dp_phy == DP_PHY_DPRX ?
		DP_TRAINING_PATTERN_SET :
		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
}

316 317
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
318
			const struct intel_crtc_state *crtc_state,
319
			enum drm_dp_phy dp_phy,
320
			u8 dp_train_pat)
321
{
322
	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
323
	u8 buf[sizeof(intel_dp->train_set) + 1];
324
	int len;
325

326 327
	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
					       dp_train_pat);
328 329

	buf[0] = dp_train_pat;
330 331 332
	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
	len = crtc_state->lane_count + 1;
333

334
	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
335 336
}

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
				const struct intel_crtc_state *crtc_state)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u8 train_set = intel_dp->train_set[0];

	drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
		    train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
		    train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
	drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
		    (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
		    DP_TRAIN_PRE_EMPHASIS_SHIFT,
		    train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
		    " (max)" : "");

	intel_dp->set_signal_levels(intel_dp, crtc_state);
}

355 356
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
357
			  const struct intel_crtc_state *crtc_state,
358
			  enum drm_dp_phy dp_phy,
359
			  u8 dp_train_pat)
360
{
361
	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
362
	intel_dp_set_signal_levels(intel_dp, crtc_state);
363
	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
364 365 366
}

static bool
367
intel_dp_update_link_train(struct intel_dp *intel_dp,
368 369
			   const struct intel_crtc_state *crtc_state,
			   enum drm_dp_phy dp_phy)
370
{
371 372 373
	int reg = dp_phy == DP_PHY_DPRX ?
			    DP_TRAINING_LANE0_SET :
			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
374 375
	int ret;

376
	intel_dp_set_signal_levels(intel_dp, crtc_state);
377

378
	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
379
				intel_dp->train_set, crtc_state->lane_count);
380

381
	return ret == crtc_state->lane_count;
382 383
}

384 385
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
					     const struct intel_crtc_state *crtc_state)
386 387 388
{
	int lane;

389
	for (lane = 0; lane < crtc_state->lane_count; lane++)
390 391 392 393 394 395 396
		if ((intel_dp->train_set[lane] &
		     DP_TRAIN_MAX_SWING_REACHED) == 0)
			return false;

	return true;
}

397 398 399 400
/*
 * Prepare link training by configuring the link parameters. On DDI platforms
 * also enable the port here.
 */
401
static bool
402 403
intel_dp_prepare_link_train(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *crtc_state)
404
{
405
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
406 407
	u8 link_config[2];
	u8 link_bw, rate_select;
408

409
	if (intel_dp->prepare_link_retrain)
410
		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
411

412
	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
413 414
			      &link_bw, &rate_select);

415
	if (link_bw)
416 417
		drm_dbg_kms(&i915->drm,
			    "Using LINK_BW_SET value %02x\n", link_bw);
418
	else
419 420
		drm_dbg_kms(&i915->drm,
			    "Using LINK_RATE_SET value %02x\n", rate_select);
421

422 423
	/* Write the link configuration data */
	link_config[0] = link_bw;
424
	link_config[1] = crtc_state->lane_count;
425 426 427
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
428

429 430
	/* eDP 1.4 rate select method. */
	if (!link_bw)
431 432 433 434 435 436 437 438 439
		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
				  &rate_select, 1);

	link_config[0] = 0;
	link_config[1] = DP_SET_ANSI_8B10B;
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);

	intel_dp->DP |= DP_PORT_EN;

440 441 442
	return true;
}

443 444 445 446 447 448 449 450 451 452 453 454 455
static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
							enum drm_dp_phy dp_phy)
{
	if (dp_phy == DP_PHY_DPRX)
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
	else
		drm_dp_lttpr_link_train_clock_recovery_delay();
}

/*
 * Perform the link training clock recovery phase on the given DP PHY using
 * training pattern 1.
 */
456 457
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
458 459
				      const struct intel_crtc_state *crtc_state,
				      enum drm_dp_phy dp_phy)
460 461 462 463 464 465
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 voltage;
	int voltage_tries, cr_tries, max_cr_tries;
	bool max_vswing_reached = false;

466
	/* clock recovery */
467
	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
468 469
				       DP_TRAINING_PATTERN_1 |
				       DP_LINK_SCRAMBLING_DISABLE)) {
470
		drm_err(&i915->drm, "failed to enable link training\n");
471
		return false;
472 473
	}

474
	/*
475 476 477 478 479 480
	 * The DP 1.4 spec defines the max clock recovery retries value
	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
	 * x 5 identical voltage retries). Since the previous specs didn't
	 * define a limit and created the possibility of an infinite loop
	 * we want to prevent any sync from triggering that corner case.
481 482 483 484 485 486
	 */
	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
		max_cr_tries = 10;
	else
		max_cr_tries = 80;

487
	voltage_tries = 1;
488
	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
489
		u8 link_status[DP_LINK_STATUS_SIZE];
490

491
		intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
492

493 494
		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
						     link_status) < 0) {
495
			drm_err(&i915->drm, "failed to get link status\n");
496
			return false;
497 498
		}

499
		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
500
			drm_dbg_kms(&i915->drm, "clock recovery OK\n");
501
			return true;
502 503
		}

504
		if (voltage_tries == 5) {
505 506
			drm_dbg_kms(&i915->drm,
				    "Same voltage tried 5 times\n");
507 508 509
			return false;
		}

510
		if (max_vswing_reached) {
511
			drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
512
			return false;
513 514 515 516 517
		}

		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;

		/* Update training set as requested by target */
518 519 520
		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
					  link_status);
		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
521 522
			drm_err(&i915->drm,
				"failed to update link training\n");
523
			return false;
524
		}
525 526 527 528 529 530 531

		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
		    voltage)
			++voltage_tries;
		else
			voltage_tries = 1;

532
		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
533
			max_vswing_reached = true;
534

535
	}
536 537
	drm_err(&i915->drm,
		"Failed clock recovery %d times, giving up!\n", max_cr_tries);
538
	return false;
539 540
}

541
/*
542 543
 * Pick training pattern for channel equalization. Training pattern 4 for HBR3
 * or for 1.4 devices that support it, training Pattern 3 for HBR2
544 545
 * or 1.2 devices that support it, Training Pattern 2 otherwise.
 */
546
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
547 548
				     const struct intel_crtc_state *crtc_state,
				     enum drm_dp_phy dp_phy)
549
{
550
	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
551

552 553 554 555 556
	/*
	 * Intel platforms that support HBR3 also support TPS4. It is mandatory
	 * for all downstream devices that support HBR3. There are no known eDP
	 * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
	 * specification.
557
	 * LTTPRs must support TPS4.
558 559
	 */
	source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
560 561
	sink_tps4 = dp_phy != DP_PHY_DPRX ||
		    drm_dp_tps4_supported(intel_dp->dpcd);
562 563
	if (source_tps4 && sink_tps4) {
		return DP_TRAINING_PATTERN_4;
564
	} else if (crtc_state->port_clock == 810000) {
565
		if (!source_tps4)
566 567
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    "8.1 Gbps link rate without source HBR3/TPS4 support\n");
568
		if (!sink_tps4)
569 570
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    "8.1 Gbps link rate without sink TPS4 support\n");
571
	}
572 573
	/*
	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
574 575
	 * also mandatory for downstream devices that support HBR2. However, not
	 * all sinks follow the spec.
576
	 */
577
	source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
578 579
	sink_tps3 = dp_phy != DP_PHY_DPRX ||
		    drm_dp_tps3_supported(intel_dp->dpcd);
580
	if (source_tps3 && sink_tps3) {
581
		return  DP_TRAINING_PATTERN_3;
582
	} else if (crtc_state->port_clock >= 540000) {
583
		if (!source_tps3)
584 585
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
586
		if (!sink_tps3)
587 588
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
589
	}
590

591
	return DP_TRAINING_PATTERN_2;
592 593
}

594 595 596 597 598 599 600 601 602 603 604 605 606
static void
intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
						  enum drm_dp_phy dp_phy)
{
	if (dp_phy == DP_PHY_DPRX) {
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
	} else {
		const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

		drm_dp_lttpr_link_train_channel_eq_delay(phy_caps);
	}
}

607
/*
608 609 610
 * Perform the link training channel equalization phase on the given DP PHY
 * using one of training pattern 2, 3 or 4 depending on the source and
 * sink capabilities.
611
 */
612
static bool
613
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
614 615
					    const struct intel_crtc_state *crtc_state,
					    enum drm_dp_phy dp_phy)
616
{
617
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
618
	int tries;
619
	u32 training_pattern;
620
	u8 link_status[DP_LINK_STATUS_SIZE];
621
	bool channel_eq = false;
622

623
	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
624 625 626
	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
	if (training_pattern != DP_TRAINING_PATTERN_4)
		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
627

628
	/* channel equalization */
629
	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
630
				     training_pattern)) {
631
		drm_err(&i915->drm, "failed to start channel equalization\n");
632
		return false;
633 634
	}

635
	for (tries = 0; tries < 5; tries++) {
636 637 638 639
		intel_dp_link_training_channel_equalization_delay(intel_dp,
								  dp_phy);
		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
						     link_status) < 0) {
640 641
			drm_err(&i915->drm,
				"failed to get link status\n");
642 643 644 645 646
			break;
		}

		/* Make sure clock is still ok */
		if (!drm_dp_clock_recovery_ok(link_status,
647
					      crtc_state->lane_count)) {
648
			intel_dp_dump_link_status(link_status);
649 650 651
			drm_dbg_kms(&i915->drm,
				    "Clock recovery check failed, cannot "
				    "continue channel equalization\n");
652
			break;
653 654 655
		}

		if (drm_dp_channel_eq_ok(link_status,
656
					 crtc_state->lane_count)) {
657
			channel_eq = true;
658 659
			drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
				    "successful\n");
660 661 662 663
			break;
		}

		/* Update training set as requested by target */
664 665 666
		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
					  link_status);
		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
667 668
			drm_err(&i915->drm,
				"failed to update link training\n");
669 670
			break;
		}
671 672 673 674 675
	}

	/* Try 5 times, else fail and try at lower BW */
	if (tries == 5) {
		intel_dp_dump_link_status(link_status);
676 677
		drm_dbg_kms(&i915->drm,
			    "Channel equalization failed 5 times\n");
678 679
	}

680
	return channel_eq;
681 682
}

683 684
static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
						   enum drm_dp_phy dp_phy)
685
{
686
	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
687 688
	u8 val = DP_TRAINING_PATTERN_DISABLE;

689
	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
690 691
}

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
/**
 * intel_dp_stop_link_train - stop link training
 * @intel_dp: DP struct
 * @crtc_state: state for CRTC attached to the encoder
 *
 * Stop the link training of the @intel_dp port, disabling the test pattern
 * symbol generation on the port and disabling the training pattern in
 * the sink's DPCD.
 *
 * What symbols are output on the port after this point is
 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
 * with the pipe being disabled, on older platforms it's HW specific if/how an
 * idle pattern is generated, as the pipe is already enabled here for those.
 *
 * This function must be called after intel_dp_start_link_train().
 */
708 709
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
			      const struct intel_crtc_state *crtc_state)
710
{
711 712
	intel_dp->link_trained = true;

713 714 715
	intel_dp_program_link_training_pattern(intel_dp,
					       crtc_state,
					       DP_TRAINING_PATTERN_DISABLE);
716
	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
717 718
}

719
static bool
720 721 722
intel_dp_link_train_phy(struct intel_dp *intel_dp,
			const struct intel_crtc_state *crtc_state,
			enum drm_dp_phy dp_phy)
723
{
724
	struct intel_connector *intel_connector = intel_dp->attached_connector;
725
	char phy_name[10];
726 727
	bool ret = false;

728
	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
729 730
		goto out;

731
	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
732
		goto out;
733

734
	ret = true;
735

736
out:
737
	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
738
		    "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s",
739 740
		    intel_connector->base.base.id,
		    intel_connector->base.name,
741
		    ret ? "passed" : "failed",
742 743
		    crtc_state->port_clock, crtc_state->lane_count,
		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
744

745 746 747 748 749 750 751 752
	return ret;
}

static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
						     const struct intel_crtc_state *crtc_state)
{
	struct intel_connector *intel_connector = intel_dp->attached_connector;

753 754 755 756 757
	if (intel_dp->hobl_active) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "Link Training failed with HOBL active, not enabling it from now on");
		intel_dp->hobl_failed = true;
	} else if (intel_dp_get_link_train_fallback_values(intel_dp,
758 759
							   crtc_state->port_clock,
							   crtc_state->lane_count)) {
760 761 762 763 764
		return;
	}

	/* Schedule a Hotplug Uevent to userspace to start modeset */
	schedule_work(&intel_connector->modeset_retry_work);
765
}
766

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
/* Perform the link training on all LTTPRs and the DPRX on a link. */
static bool
intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
			     const struct intel_crtc_state *crtc_state,
			     int lttpr_count)
{
	bool ret = true;
	int i;

	intel_dp_prepare_link_train(intel_dp, crtc_state);

	for (i = lttpr_count - 1; i >= 0; i--) {
		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);

		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);

		if (!ret)
			break;
	}

	if (ret)
		intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);

	if (intel_dp->set_idle_link_train)
		intel_dp->set_idle_link_train(intel_dp, crtc_state);

	return ret;
}

797 798 799 800 801 802 803 804 805 806 807 808 809
/**
 * intel_dp_start_link_train - start link training
 * @intel_dp: DP struct
 * @crtc_state: state for CRTC attached to the encoder
 *
 * Start the link training of the @intel_dp port, scheduling a fallback
 * retraining with reduced link rate/lane parameters if the link training
 * fails.
 * After calling this function intel_dp_stop_link_train() must be called.
 */
void intel_dp_start_link_train(struct intel_dp *intel_dp,
			       const struct intel_crtc_state *crtc_state)
{
810 811 812 813
	/*
	 * TODO: Reiniting LTTPRs here won't be needed once proper connector
	 * HW state readout is added.
	 */
814
	int lttpr_count = intel_dp_lttpr_init(intel_dp);
815

816
	if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
817 818
		intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}