intel_dp_link_training.c 26.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2008-2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

24
#include "intel_display_types.h"
25
#include "intel_dp.h"
26
#include "intel_dp_link_training.h"
27

28
static void
29 30
intel_dp_dump_link_status(struct drm_device *drm,
			  const u8 link_status[DP_LINK_STATUS_SIZE])
31
{
32 33 34 35
	drm_dbg_kms(drm,
		    "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
		    link_status[0], link_status[1], link_status[2],
		    link_status[3], link_status[4], link_status[5]);
36 37
}

38 39
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
40
	memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
41 42
}

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
}

static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
				     char *buf, size_t buf_size)
{
	if (dp_phy == DP_PHY_DPRX)
		snprintf(buf, buf_size, "DPRX");
	else
		snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);

	return buf;
}

static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
				   enum drm_dp_phy dp_phy)
{
	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
}

static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
					 enum drm_dp_phy dp_phy)
{
	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
	char phy_name[10];

	intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));

	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "failed to read the PHY caps for %s\n",
			    phy_name);
		return;
	}

	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
		    "%s PHY capabilities: %*ph\n",
		    phy_name,
		    (int)sizeof(intel_dp->lttpr_phy_caps[0]),
		    phy_caps);
}

88 89
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
90 91 92 93 94 95 96 97 98
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	if (intel_dp_is_edp(intel_dp))
		return false;

	/*
	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
	 */
99
	if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
100 101
		return false;

102
	if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
103 104
					  intel_dp->lttpr_common_caps) < 0)
		goto reset_caps;
105 106 107 108 109 110

	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
		    "LTTPR common capabilities: %*ph\n",
		    (int)sizeof(intel_dp->lttpr_common_caps),
		    intel_dp->lttpr_common_caps);

111 112 113 114
	/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
	if (intel_dp->lttpr_common_caps[0] < 0x14)
		goto reset_caps;

115
	return true;
116 117 118 119

reset_caps:
	intel_dp_reset_lttpr_common_caps(intel_dp);
	return false;
120 121 122 123 124 125 126 127 128 129 130
}

static bool
intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
{
	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;

	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}

131
static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
132
{
133 134 135
	int lttpr_count;
	int i;

136
	if (!intel_dp_read_lttpr_common_caps(intel_dp))
137 138
		return 0;

139 140 141 142 143 144 145 146
	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
	/*
	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
	 * detected as this breaks link training at least on the Dell WD19TB
	 * dock.
	 */
	if (lttpr_count == 0)
		return 0;
147 148 149

	/*
	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
150 151
	 * non-transparent mode and the disable->enable non-transparent mode
	 * sequence.
152 153 154
	 */
	intel_dp_set_lttpr_transparent_mode(intel_dp, true);

155 156 157 158 159
	/*
	 * In case of unsupported number of LTTPRs or failing to switch to
	 * non-transparent mode fall-back to transparent link training mode,
	 * still taking into account any LTTPR common lane- rate/count limits.
	 */
160
	if (lttpr_count < 0)
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
		return 0;

	if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");

		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
		intel_dp_reset_lttpr_count(intel_dp);

		return 0;
	}

	for (i = 0; i < lttpr_count; i++)
		intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));

	return lttpr_count;
177
}
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208

/**
 * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
 * @intel_dp: Intel DP struct
 *
 * Read the LTTPR common and DPRX capabilities and switch to non-transparent
 * link training mode if any is detected and read the PHY capabilities for all
 * detected LTTPRs. In case of an LTTPR detection error or if the number of
 * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
 * transparent mode link training mode.
 *
 * Returns:
 *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
 *       DPRX capabilities are read out.
 *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
 *       detection failure and the transparent LT mode was set. The DPRX
 *       capabilities are read out.
 *   <0  Reading out the DPRX capabilities failed.
 */
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
	int lttpr_count = intel_dp_init_lttpr(intel_dp);

	/* The DPTX shall read the DPRX caps after LTTPR detection. */
	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
		intel_dp_reset_lttpr_common_caps(intel_dp);
		return -EIO;
	}

	return lttpr_count;
}
209
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
210

211
static u8 dp_voltage_max(u8 preemph)
212
{
213 214 215 216 217 218 219 220
	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
221
	default:
222
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
223 224 225
	}
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
	else
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}

static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
		return DP_TRAIN_PRE_EMPH_LEVEL_3;
	else
		return DP_TRAIN_PRE_EMPH_LEVEL_2;
}

static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
253
	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
254

255
	drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
256

257
	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
}

static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
				   const struct intel_crtc_state *crtc_state,
				   enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 voltage_max;

	/*
	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
	 * the DPRX_PHY we train.
	 */
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
	else
		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);

	drm_WARN_ON_ONCE(&i915->drm,
			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);

	return voltage_max;
}

static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
				   enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 preemph_max;

	/*
	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
	 * the DPRX_PHY we train.
	 */
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		preemph_max = intel_dp->preemph_max(intel_dp);
	else
		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);

	drm_WARN_ON_ONCE(&i915->drm,
			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);

	return preemph_max;
}

305 306 307
void
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
			  const struct intel_crtc_state *crtc_state,
308
			  enum drm_dp_phy dp_phy,
309
			  const u8 link_status[DP_LINK_STATUS_SIZE])
310
{
311 312
	u8 v = 0;
	u8 p = 0;
313
	int lane;
314 315
	u8 voltage_max;
	u8 preemph_max;
316

317
	for (lane = 0; lane < crtc_state->lane_count; lane++) {
318 319
		v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
		p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
320 321
	}

322
	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
323 324 325
	if (p >= preemph_max)
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;

326 327
	v = min(v, dp_voltage_max(p));

328
	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
329 330 331
	if (v >= voltage_max)
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;

332 333 334 335
	for (lane = 0; lane < 4; lane++)
		intel_dp->train_set[lane] = v | p;
}

336 337 338 339 340 341 342 343
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
					     enum drm_dp_phy dp_phy)
{
	return dp_phy == DP_PHY_DPRX ?
		DP_TRAINING_PATTERN_SET :
		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
}

344 345
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
346
			const struct intel_crtc_state *crtc_state,
347
			enum drm_dp_phy dp_phy,
348
			u8 dp_train_pat)
349
{
350
	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
351
	u8 buf[sizeof(intel_dp->train_set) + 1];
352
	int len;
353

354 355
	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
					       dp_train_pat);
356 357

	buf[0] = dp_train_pat;
358 359 360
	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
	len = crtc_state->lane_count + 1;
361

362
	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
363 364
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
static char dp_training_pattern_name(u8 train_pat)
{
	switch (train_pat) {
	case DP_TRAINING_PATTERN_1:
	case DP_TRAINING_PATTERN_2:
	case DP_TRAINING_PATTERN_3:
		return '0' + train_pat;
	case DP_TRAINING_PATTERN_4:
		return '4';
	default:
		MISSING_CASE(train_pat);
		return '?';
	}
}

void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
				       const struct intel_crtc_state *crtc_state,
				       u8 dp_train_pat)
{
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);

	if (train_pat != DP_TRAINING_PATTERN_DISABLE)
		drm_dbg_kms(&dev_priv->drm,
			    "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
			    encoder->base.base.id, encoder->base.name,
			    dp_training_pattern_name(train_pat));

	intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
}

398
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
399 400
				const struct intel_crtc_state *crtc_state,
				enum drm_dp_phy dp_phy)
401 402 403
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u8 train_set = intel_dp->train_set[0];
404
	char phy_name[10];
405

406
	drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
407
		    train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
408
		    train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
409 410 411
		    (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
		    DP_TRAIN_PRE_EMPHASIS_SHIFT,
		    train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
412 413
		    " (max)" : "",
		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
414

415 416
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		intel_dp->set_signal_levels(intel_dp, crtc_state);
417 418
}

419 420
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
421
			  const struct intel_crtc_state *crtc_state,
422
			  enum drm_dp_phy dp_phy,
423
			  u8 dp_train_pat)
424
{
425
	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
426
	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
427
	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
428 429 430
}

static bool
431
intel_dp_update_link_train(struct intel_dp *intel_dp,
432 433
			   const struct intel_crtc_state *crtc_state,
			   enum drm_dp_phy dp_phy)
434
{
435 436 437
	int reg = dp_phy == DP_PHY_DPRX ?
			    DP_TRAINING_LANE0_SET :
			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
438 439
	int ret;

440
	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
441

442
	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
443
				intel_dp->train_set, crtc_state->lane_count);
444

445
	return ret == crtc_state->lane_count;
446 447
}

448 449
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
					     const struct intel_crtc_state *crtc_state)
450 451 452
{
	int lane;

453
	for (lane = 0; lane < crtc_state->lane_count; lane++)
454 455 456 457 458 459 460
		if ((intel_dp->train_set[lane] &
		     DP_TRAIN_MAX_SWING_REACHED) == 0)
			return false;

	return true;
}

461 462 463 464
/*
 * Prepare link training by configuring the link parameters. On DDI platforms
 * also enable the port here.
 */
465
static bool
466 467
intel_dp_prepare_link_train(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *crtc_state)
468
{
469
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
470 471
	u8 link_config[2];
	u8 link_bw, rate_select;
472

473
	if (intel_dp->prepare_link_retrain)
474
		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
475

476
	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
477 478
			      &link_bw, &rate_select);

479
	if (link_bw)
480 481
		drm_dbg_kms(&i915->drm,
			    "Using LINK_BW_SET value %02x\n", link_bw);
482
	else
483 484
		drm_dbg_kms(&i915->drm,
			    "Using LINK_RATE_SET value %02x\n", rate_select);
485

486 487
	/* Write the link configuration data */
	link_config[0] = link_bw;
488
	link_config[1] = crtc_state->lane_count;
489 490 491
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
492

493 494
	/* eDP 1.4 rate select method. */
	if (!link_bw)
495 496 497
		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
				  &rate_select, 1);

498
	link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
499 500 501 502 503
	link_config[1] = DP_SET_ANSI_8B10B;
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);

	intel_dp->DP |= DP_PORT_EN;

504 505 506
	return true;
}

507 508 509 510 511 512 513 514 515 516 517 518 519
static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
							enum drm_dp_phy dp_phy)
{
	if (dp_phy == DP_PHY_DPRX)
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
	else
		drm_dp_lttpr_link_train_clock_recovery_delay();
}

/*
 * Perform the link training clock recovery phase on the given DP PHY using
 * training pattern 1.
 */
520 521
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
522 523
				      const struct intel_crtc_state *crtc_state,
				      enum drm_dp_phy dp_phy)
524 525 526 527 528 529
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 voltage;
	int voltage_tries, cr_tries, max_cr_tries;
	bool max_vswing_reached = false;

530
	/* clock recovery */
531
	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
532 533
				       DP_TRAINING_PATTERN_1 |
				       DP_LINK_SCRAMBLING_DISABLE)) {
534
		drm_err(&i915->drm, "failed to enable link training\n");
535
		return false;
536 537
	}

538
	/*
539 540 541 542 543 544
	 * The DP 1.4 spec defines the max clock recovery retries value
	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
	 * x 5 identical voltage retries). Since the previous specs didn't
	 * define a limit and created the possibility of an infinite loop
	 * we want to prevent any sync from triggering that corner case.
545 546 547 548 549 550
	 */
	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
		max_cr_tries = 10;
	else
		max_cr_tries = 80;

551
	voltage_tries = 1;
552
	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
553
		u8 link_status[DP_LINK_STATUS_SIZE];
554

555
		intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
556

557 558
		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
						     link_status) < 0) {
559
			drm_err(&i915->drm, "failed to get link status\n");
560
			return false;
561 562
		}

563
		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
564
			drm_dbg_kms(&i915->drm, "clock recovery OK\n");
565
			return true;
566 567
		}

568
		if (voltage_tries == 5) {
569 570
			drm_dbg_kms(&i915->drm,
				    "Same voltage tried 5 times\n");
571 572 573
			return false;
		}

574
		if (max_vswing_reached) {
575
			drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
576
			return false;
577 578 579 580 581
		}

		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;

		/* Update training set as requested by target */
582 583 584
		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
					  link_status);
		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
585 586
			drm_err(&i915->drm,
				"failed to update link training\n");
587
			return false;
588
		}
589 590 591 592 593 594 595

		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
		    voltage)
			++voltage_tries;
		else
			voltage_tries = 1;

596
		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
597
			max_vswing_reached = true;
598

599
	}
600 601
	drm_err(&i915->drm,
		"Failed clock recovery %d times, giving up!\n", max_cr_tries);
602
	return false;
603 604
}

605
/*
606 607
 * Pick training pattern for channel equalization. Training pattern 4 for HBR3
 * or for 1.4 devices that support it, training Pattern 3 for HBR2
608 609
 * or 1.2 devices that support it, Training Pattern 2 otherwise.
 */
610
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
611 612
				     const struct intel_crtc_state *crtc_state,
				     enum drm_dp_phy dp_phy)
613
{
614
	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
615

616 617 618 619 620
	/*
	 * Intel platforms that support HBR3 also support TPS4. It is mandatory
	 * for all downstream devices that support HBR3. There are no known eDP
	 * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
	 * specification.
621
	 * LTTPRs must support TPS4.
622 623
	 */
	source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
624 625
	sink_tps4 = dp_phy != DP_PHY_DPRX ||
		    drm_dp_tps4_supported(intel_dp->dpcd);
626 627
	if (source_tps4 && sink_tps4) {
		return DP_TRAINING_PATTERN_4;
628
	} else if (crtc_state->port_clock == 810000) {
629
		if (!source_tps4)
630 631
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    "8.1 Gbps link rate without source HBR3/TPS4 support\n");
632
		if (!sink_tps4)
633 634
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    "8.1 Gbps link rate without sink TPS4 support\n");
635
	}
636 637
	/*
	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
638 639
	 * also mandatory for downstream devices that support HBR2. However, not
	 * all sinks follow the spec.
640
	 */
641
	source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
642 643
	sink_tps3 = dp_phy != DP_PHY_DPRX ||
		    drm_dp_tps3_supported(intel_dp->dpcd);
644
	if (source_tps3 && sink_tps3) {
645
		return  DP_TRAINING_PATTERN_3;
646
	} else if (crtc_state->port_clock >= 540000) {
647
		if (!source_tps3)
648 649
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
650
		if (!sink_tps3)
651 652
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
653
	}
654

655
	return DP_TRAINING_PATTERN_2;
656 657
}

658 659 660 661 662 663 664 665 666 667 668 669 670
static void
intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
						  enum drm_dp_phy dp_phy)
{
	if (dp_phy == DP_PHY_DPRX) {
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
	} else {
		const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

		drm_dp_lttpr_link_train_channel_eq_delay(phy_caps);
	}
}

671
/*
672 673 674
 * Perform the link training channel equalization phase on the given DP PHY
 * using one of training pattern 2, 3 or 4 depending on the source and
 * sink capabilities.
675
 */
676
static bool
677
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
678 679
					    const struct intel_crtc_state *crtc_state,
					    enum drm_dp_phy dp_phy)
680
{
681
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
682
	int tries;
683
	u32 training_pattern;
684
	u8 link_status[DP_LINK_STATUS_SIZE];
685
	bool channel_eq = false;
686

687
	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
688 689 690
	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
	if (training_pattern != DP_TRAINING_PATTERN_4)
		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
691

692
	/* channel equalization */
693
	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
694
				     training_pattern)) {
695
		drm_err(&i915->drm, "failed to start channel equalization\n");
696
		return false;
697 698
	}

699
	for (tries = 0; tries < 5; tries++) {
700 701 702 703
		intel_dp_link_training_channel_equalization_delay(intel_dp,
								  dp_phy);
		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
						     link_status) < 0) {
704 705
			drm_err(&i915->drm,
				"failed to get link status\n");
706 707 708 709 710
			break;
		}

		/* Make sure clock is still ok */
		if (!drm_dp_clock_recovery_ok(link_status,
711
					      crtc_state->lane_count)) {
712
			intel_dp_dump_link_status(&i915->drm, link_status);
713 714 715
			drm_dbg_kms(&i915->drm,
				    "Clock recovery check failed, cannot "
				    "continue channel equalization\n");
716
			break;
717 718 719
		}

		if (drm_dp_channel_eq_ok(link_status,
720
					 crtc_state->lane_count)) {
721
			channel_eq = true;
722 723
			drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
				    "successful\n");
724 725 726 727
			break;
		}

		/* Update training set as requested by target */
728 729 730
		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
					  link_status);
		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
731 732
			drm_err(&i915->drm,
				"failed to update link training\n");
733 734
			break;
		}
735 736 737 738
	}

	/* Try 5 times, else fail and try at lower BW */
	if (tries == 5) {
739
		intel_dp_dump_link_status(&i915->drm, link_status);
740 741
		drm_dbg_kms(&i915->drm,
			    "Channel equalization failed 5 times\n");
742 743
	}

744
	return channel_eq;
745 746
}

747 748
static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
						   enum drm_dp_phy dp_phy)
749
{
750
	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
751 752
	u8 val = DP_TRAINING_PATTERN_DISABLE;

753
	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
754 755
}

756 757 758 759 760
/**
 * intel_dp_stop_link_train - stop link training
 * @intel_dp: DP struct
 * @crtc_state: state for CRTC attached to the encoder
 *
761 762 763
 * Stop the link training of the @intel_dp port, disabling the training
 * pattern in the sink's DPCD, and disabling the test pattern symbol
 * generation on the port.
764 765 766 767 768 769 770 771
 *
 * What symbols are output on the port after this point is
 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
 * with the pipe being disabled, on older platforms it's HW specific if/how an
 * idle pattern is generated, as the pipe is already enabled here for those.
 *
 * This function must be called after intel_dp_start_link_train().
 */
772 773
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
			      const struct intel_crtc_state *crtc_state)
774
{
775 776
	intel_dp->link_trained = true;

777
	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
778 779
	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
					       DP_TRAINING_PATTERN_DISABLE);
780 781
}

782
static bool
783 784 785
intel_dp_link_train_phy(struct intel_dp *intel_dp,
			const struct intel_crtc_state *crtc_state,
			enum drm_dp_phy dp_phy)
786
{
787
	struct intel_connector *intel_connector = intel_dp->attached_connector;
788
	char phy_name[10];
789 790
	bool ret = false;

791
	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
792 793
		goto out;

794
	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
795
		goto out;
796

797
	ret = true;
798

799
out:
800
	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
801
		    "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n",
802 803
		    intel_connector->base.base.id,
		    intel_connector->base.name,
804
		    ret ? "passed" : "failed",
805 806
		    crtc_state->port_clock, crtc_state->lane_count,
		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
807

808 809 810 811 812 813 814 815
	return ret;
}

static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
						     const struct intel_crtc_state *crtc_state)
{
	struct intel_connector *intel_connector = intel_dp->attached_connector;

816 817 818 819 820
	if (intel_dp->hobl_active) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "Link Training failed with HOBL active, not enabling it from now on");
		intel_dp->hobl_failed = true;
	} else if (intel_dp_get_link_train_fallback_values(intel_dp,
821 822
							   crtc_state->port_clock,
							   crtc_state->lane_count)) {
823 824 825 826 827
		return;
	}

	/* Schedule a Hotplug Uevent to userspace to start modeset */
	schedule_work(&intel_connector->modeset_retry_work);
828
}
829

830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
/* Perform the link training on all LTTPRs and the DPRX on a link. */
static bool
intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
			     const struct intel_crtc_state *crtc_state,
			     int lttpr_count)
{
	bool ret = true;
	int i;

	intel_dp_prepare_link_train(intel_dp, crtc_state);

	for (i = lttpr_count - 1; i >= 0; i--) {
		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);

		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);

		if (!ret)
			break;
	}

	if (ret)
		intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);

	if (intel_dp->set_idle_link_train)
		intel_dp->set_idle_link_train(intel_dp, crtc_state);

	return ret;
}

860 861 862 863 864 865 866 867 868 869 870 871 872
/**
 * intel_dp_start_link_train - start link training
 * @intel_dp: DP struct
 * @crtc_state: state for CRTC attached to the encoder
 *
 * Start the link training of the @intel_dp port, scheduling a fallback
 * retraining with reduced link rate/lane parameters if the link training
 * fails.
 * After calling this function intel_dp_stop_link_train() must be called.
 */
void intel_dp_start_link_train(struct intel_dp *intel_dp,
			       const struct intel_crtc_state *crtc_state)
{
873 874 875 876
	/*
	 * TODO: Reiniting LTTPRs here won't be needed once proper connector
	 * HW state readout is added.
	 */
877 878 879
	int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);

	if (lttpr_count < 0)
880 881
		/* Still continue with enabling the port and link training. */
		lttpr_count = 0;
882

883
	if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
884 885
		intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}