intel_dp_link_training.c 24.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2008-2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

24
#include "intel_display_types.h"
25
#include "intel_dp.h"
26
#include "intel_dp_link_training.h"
27

28
static void
29
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
30 31 32 33 34 35 36
{

	DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
		      link_status[0], link_status[1], link_status[2],
		      link_status[3], link_status[4], link_status[5]);
}

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
}

static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
				     char *buf, size_t buf_size)
{
	if (dp_phy == DP_PHY_DPRX)
		snprintf(buf, buf_size, "DPRX");
	else
		snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);

	return buf;
}

static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
				   enum drm_dp_phy dp_phy)
{
	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
}

static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
					 enum drm_dp_phy dp_phy)
{
	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
	char phy_name[10];

	intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));

	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "failed to read the PHY caps for %s\n",
			    phy_name);
		return;
	}

	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
		    "%s PHY capabilities: %*ph\n",
		    phy_name,
		    (int)sizeof(intel_dp->lttpr_phy_caps[0]),
		    phy_caps);
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
	if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
					  intel_dp->lttpr_common_caps) < 0) {
		memset(intel_dp->lttpr_common_caps, 0,
		       sizeof(intel_dp->lttpr_common_caps));
		return false;
	}

	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
		    "LTTPR common capabilities: %*ph\n",
		    (int)sizeof(intel_dp->lttpr_common_caps),
		    intel_dp->lttpr_common_caps);

	return true;
}

static bool
intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
{
	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;

	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}

/**
 * intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode
 * @intel_dp: Intel DP struct
 *
112 113 114 115 116 117 118 119 120 121
 * Read the LTTPR common capabilities, switch to non-transparent link training
 * mode if any is detected and read the PHY capabilities for all detected
 * LTTPRs. In case of an LTTPR detection error or if the number of
 * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
 * transparent mode link training mode.
 *
 * Returns:
 *   >0  if LTTPRs were detected and the non-transparent LT mode was set
 *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
 *       detection failure and the transparent LT mode was set
122 123 124
 */
int intel_dp_lttpr_init(struct intel_dp *intel_dp)
{
125 126 127 128
	int lttpr_count;
	bool ret;
	int i;

129 130 131
	if (intel_dp_is_edp(intel_dp))
		return 0;

132
	ret = intel_dp_read_lttpr_common_caps(intel_dp);
133 134 135 136 137 138 139 140 141 142 143
	if (!ret)
		return 0;

	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
	/*
	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
	 * detected as this breaks link training at least on the Dell WD19TB
	 * dock.
	 */
	if (lttpr_count == 0)
		return 0;
144 145 146

	/*
	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
147 148
	 * non-transparent mode and the disable->enable non-transparent mode
	 * sequence.
149 150 151
	 */
	intel_dp_set_lttpr_transparent_mode(intel_dp, true);

152 153 154 155 156
	/*
	 * In case of unsupported number of LTTPRs or failing to switch to
	 * non-transparent mode fall-back to transparent link training mode,
	 * still taking into account any LTTPR common lane- rate/count limits.
	 */
157
	if (lttpr_count < 0)
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
		return 0;

	if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");

		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
		intel_dp_reset_lttpr_count(intel_dp);

		return 0;
	}

	for (i = 0; i < lttpr_count; i++)
		intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));

	return lttpr_count;
174
}
175
EXPORT_SYMBOL(intel_dp_lttpr_init);
176

177
static u8 dp_voltage_max(u8 preemph)
178
{
179 180 181 182 183 184 185 186
	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
187
	default:
188
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
189 190 191
	}
}

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
	else
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}

static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
		return DP_TRAIN_PRE_EMPH_LEVEL_3;
	else
		return DP_TRAIN_PRE_EMPH_LEVEL_2;
}

static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
219
	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
220

221
	drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
222

223
	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
}

static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
				   const struct intel_crtc_state *crtc_state,
				   enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 voltage_max;

	/*
	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
	 * the DPRX_PHY we train.
	 */
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
	else
		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);

	drm_WARN_ON_ONCE(&i915->drm,
			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);

	return voltage_max;
}

static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
				   enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 preemph_max;

	/*
	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
	 * the DPRX_PHY we train.
	 */
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		preemph_max = intel_dp->preemph_max(intel_dp);
	else
		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);

	drm_WARN_ON_ONCE(&i915->drm,
			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);

	return preemph_max;
}

271 272 273
void
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
			  const struct intel_crtc_state *crtc_state,
274
			  enum drm_dp_phy dp_phy,
275
			  const u8 link_status[DP_LINK_STATUS_SIZE])
276
{
277 278
	u8 v = 0;
	u8 p = 0;
279
	int lane;
280 281
	u8 voltage_max;
	u8 preemph_max;
282

283
	for (lane = 0; lane < crtc_state->lane_count; lane++) {
284 285
		v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
		p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
286 287
	}

288
	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
289 290 291
	if (p >= preemph_max)
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;

292 293
	v = min(v, dp_voltage_max(p));

294
	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
295 296 297
	if (v >= voltage_max)
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;

298 299 300 301
	for (lane = 0; lane < 4; lane++)
		intel_dp->train_set[lane] = v | p;
}

302 303 304 305 306 307 308 309
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
					     enum drm_dp_phy dp_phy)
{
	return dp_phy == DP_PHY_DPRX ?
		DP_TRAINING_PATTERN_SET :
		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
}

310 311
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
312
			const struct intel_crtc_state *crtc_state,
313
			enum drm_dp_phy dp_phy,
314
			u8 dp_train_pat)
315
{
316
	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
317
	u8 buf[sizeof(intel_dp->train_set) + 1];
318
	int len;
319

320 321
	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
					       dp_train_pat);
322 323

	buf[0] = dp_train_pat;
324 325 326
	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
	len = crtc_state->lane_count + 1;
327

328
	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
329 330
}

331
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
332 333
				const struct intel_crtc_state *crtc_state,
				enum drm_dp_phy dp_phy)
334 335 336
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u8 train_set = intel_dp->train_set[0];
337
	char phy_name[10];
338

339
	drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
340
		    train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
341
		    train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
342 343 344
		    (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
		    DP_TRAIN_PRE_EMPHASIS_SHIFT,
		    train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
345 346
		    " (max)" : "",
		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
347

348 349
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		intel_dp->set_signal_levels(intel_dp, crtc_state);
350 351
}

352 353
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
354
			  const struct intel_crtc_state *crtc_state,
355
			  enum drm_dp_phy dp_phy,
356
			  u8 dp_train_pat)
357
{
358
	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
359
	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
360
	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
361 362 363
}

static bool
364
intel_dp_update_link_train(struct intel_dp *intel_dp,
365 366
			   const struct intel_crtc_state *crtc_state,
			   enum drm_dp_phy dp_phy)
367
{
368 369 370
	int reg = dp_phy == DP_PHY_DPRX ?
			    DP_TRAINING_LANE0_SET :
			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
371 372
	int ret;

373
	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
374

375
	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
376
				intel_dp->train_set, crtc_state->lane_count);
377

378
	return ret == crtc_state->lane_count;
379 380
}

381 382
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
					     const struct intel_crtc_state *crtc_state)
383 384 385
{
	int lane;

386
	for (lane = 0; lane < crtc_state->lane_count; lane++)
387 388 389 390 391 392 393
		if ((intel_dp->train_set[lane] &
		     DP_TRAIN_MAX_SWING_REACHED) == 0)
			return false;

	return true;
}

394 395 396 397
/*
 * Prepare link training by configuring the link parameters. On DDI platforms
 * also enable the port here.
 */
398
static bool
399 400
intel_dp_prepare_link_train(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *crtc_state)
401
{
402
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
403 404
	u8 link_config[2];
	u8 link_bw, rate_select;
405

406
	if (intel_dp->prepare_link_retrain)
407
		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
408

409
	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
410 411
			      &link_bw, &rate_select);

412
	if (link_bw)
413 414
		drm_dbg_kms(&i915->drm,
			    "Using LINK_BW_SET value %02x\n", link_bw);
415
	else
416 417
		drm_dbg_kms(&i915->drm,
			    "Using LINK_RATE_SET value %02x\n", rate_select);
418

419 420
	/* Write the link configuration data */
	link_config[0] = link_bw;
421
	link_config[1] = crtc_state->lane_count;
422 423 424
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
425

426 427
	/* eDP 1.4 rate select method. */
	if (!link_bw)
428 429 430
		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
				  &rate_select, 1);

431
	link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
432 433 434 435 436
	link_config[1] = DP_SET_ANSI_8B10B;
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);

	intel_dp->DP |= DP_PORT_EN;

437 438 439
	return true;
}

440 441 442 443 444 445 446 447 448 449 450 451 452
static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
							enum drm_dp_phy dp_phy)
{
	if (dp_phy == DP_PHY_DPRX)
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
	else
		drm_dp_lttpr_link_train_clock_recovery_delay();
}

/*
 * Perform the link training clock recovery phase on the given DP PHY using
 * training pattern 1.
 */
453 454
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
455 456
				      const struct intel_crtc_state *crtc_state,
				      enum drm_dp_phy dp_phy)
457 458 459 460 461 462
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 voltage;
	int voltage_tries, cr_tries, max_cr_tries;
	bool max_vswing_reached = false;

463
	/* clock recovery */
464
	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
465 466
				       DP_TRAINING_PATTERN_1 |
				       DP_LINK_SCRAMBLING_DISABLE)) {
467
		drm_err(&i915->drm, "failed to enable link training\n");
468
		return false;
469 470
	}

471
	/*
472 473 474 475 476 477
	 * The DP 1.4 spec defines the max clock recovery retries value
	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
	 * x 5 identical voltage retries). Since the previous specs didn't
	 * define a limit and created the possibility of an infinite loop
	 * we want to prevent any sync from triggering that corner case.
478 479 480 481 482 483
	 */
	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
		max_cr_tries = 10;
	else
		max_cr_tries = 80;

484
	voltage_tries = 1;
485
	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
486
		u8 link_status[DP_LINK_STATUS_SIZE];
487

488
		intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
489

490 491
		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
						     link_status) < 0) {
492
			drm_err(&i915->drm, "failed to get link status\n");
493
			return false;
494 495
		}

496
		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
497
			drm_dbg_kms(&i915->drm, "clock recovery OK\n");
498
			return true;
499 500
		}

501
		if (voltage_tries == 5) {
502 503
			drm_dbg_kms(&i915->drm,
				    "Same voltage tried 5 times\n");
504 505 506
			return false;
		}

507
		if (max_vswing_reached) {
508
			drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
509
			return false;
510 511 512 513 514
		}

		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;

		/* Update training set as requested by target */
515 516 517
		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
					  link_status);
		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
518 519
			drm_err(&i915->drm,
				"failed to update link training\n");
520
			return false;
521
		}
522 523 524 525 526 527 528

		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
		    voltage)
			++voltage_tries;
		else
			voltage_tries = 1;

529
		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
530
			max_vswing_reached = true;
531

532
	}
533 534
	drm_err(&i915->drm,
		"Failed clock recovery %d times, giving up!\n", max_cr_tries);
535
	return false;
536 537
}

538
/*
539 540
 * Pick training pattern for channel equalization. Training pattern 4 for HBR3
 * or for 1.4 devices that support it, training Pattern 3 for HBR2
541 542
 * or 1.2 devices that support it, Training Pattern 2 otherwise.
 */
543
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
544 545
				     const struct intel_crtc_state *crtc_state,
				     enum drm_dp_phy dp_phy)
546
{
547
	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
548

549 550 551 552 553
	/*
	 * Intel platforms that support HBR3 also support TPS4. It is mandatory
	 * for all downstream devices that support HBR3. There are no known eDP
	 * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
	 * specification.
554
	 * LTTPRs must support TPS4.
555 556
	 */
	source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
557 558
	sink_tps4 = dp_phy != DP_PHY_DPRX ||
		    drm_dp_tps4_supported(intel_dp->dpcd);
559 560
	if (source_tps4 && sink_tps4) {
		return DP_TRAINING_PATTERN_4;
561
	} else if (crtc_state->port_clock == 810000) {
562
		if (!source_tps4)
563 564
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    "8.1 Gbps link rate without source HBR3/TPS4 support\n");
565
		if (!sink_tps4)
566 567
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    "8.1 Gbps link rate without sink TPS4 support\n");
568
	}
569 570
	/*
	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
571 572
	 * also mandatory for downstream devices that support HBR2. However, not
	 * all sinks follow the spec.
573
	 */
574
	source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
575 576
	sink_tps3 = dp_phy != DP_PHY_DPRX ||
		    drm_dp_tps3_supported(intel_dp->dpcd);
577
	if (source_tps3 && sink_tps3) {
578
		return  DP_TRAINING_PATTERN_3;
579
	} else if (crtc_state->port_clock >= 540000) {
580
		if (!source_tps3)
581 582
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
583
		if (!sink_tps3)
584 585
			drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
				    ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
586
	}
587

588
	return DP_TRAINING_PATTERN_2;
589 590
}

591 592 593 594 595 596 597 598 599 600 601 602 603
static void
intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
						  enum drm_dp_phy dp_phy)
{
	if (dp_phy == DP_PHY_DPRX) {
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
	} else {
		const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

		drm_dp_lttpr_link_train_channel_eq_delay(phy_caps);
	}
}

604
/*
605 606 607
 * Perform the link training channel equalization phase on the given DP PHY
 * using one of training pattern 2, 3 or 4 depending on the source and
 * sink capabilities.
608
 */
609
static bool
610
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
611 612
					    const struct intel_crtc_state *crtc_state,
					    enum drm_dp_phy dp_phy)
613
{
614
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
615
	int tries;
616
	u32 training_pattern;
617
	u8 link_status[DP_LINK_STATUS_SIZE];
618
	bool channel_eq = false;
619

620
	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
621 622 623
	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
	if (training_pattern != DP_TRAINING_PATTERN_4)
		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
624

625
	/* channel equalization */
626
	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
627
				     training_pattern)) {
628
		drm_err(&i915->drm, "failed to start channel equalization\n");
629
		return false;
630 631
	}

632
	for (tries = 0; tries < 5; tries++) {
633 634 635 636
		intel_dp_link_training_channel_equalization_delay(intel_dp,
								  dp_phy);
		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
						     link_status) < 0) {
637 638
			drm_err(&i915->drm,
				"failed to get link status\n");
639 640 641 642 643
			break;
		}

		/* Make sure clock is still ok */
		if (!drm_dp_clock_recovery_ok(link_status,
644
					      crtc_state->lane_count)) {
645
			intel_dp_dump_link_status(link_status);
646 647 648
			drm_dbg_kms(&i915->drm,
				    "Clock recovery check failed, cannot "
				    "continue channel equalization\n");
649
			break;
650 651 652
		}

		if (drm_dp_channel_eq_ok(link_status,
653
					 crtc_state->lane_count)) {
654
			channel_eq = true;
655 656
			drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
				    "successful\n");
657 658 659 660
			break;
		}

		/* Update training set as requested by target */
661 662 663
		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
					  link_status);
		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
664 665
			drm_err(&i915->drm,
				"failed to update link training\n");
666 667
			break;
		}
668 669 670 671 672
	}

	/* Try 5 times, else fail and try at lower BW */
	if (tries == 5) {
		intel_dp_dump_link_status(link_status);
673 674
		drm_dbg_kms(&i915->drm,
			    "Channel equalization failed 5 times\n");
675 676
	}

677
	return channel_eq;
678 679
}

680 681
static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
						   enum drm_dp_phy dp_phy)
682
{
683
	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
684 685
	u8 val = DP_TRAINING_PATTERN_DISABLE;

686
	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
687 688
}

689 690 691 692 693
/**
 * intel_dp_stop_link_train - stop link training
 * @intel_dp: DP struct
 * @crtc_state: state for CRTC attached to the encoder
 *
694 695 696
 * Stop the link training of the @intel_dp port, disabling the training
 * pattern in the sink's DPCD, and disabling the test pattern symbol
 * generation on the port.
697 698 699 700 701 702 703 704
 *
 * What symbols are output on the port after this point is
 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
 * with the pipe being disabled, on older platforms it's HW specific if/how an
 * idle pattern is generated, as the pipe is already enabled here for those.
 *
 * This function must be called after intel_dp_start_link_train().
 */
705 706
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
			      const struct intel_crtc_state *crtc_state)
707
{
708 709
	intel_dp->link_trained = true;

710
	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
711 712
	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
					       DP_TRAINING_PATTERN_DISABLE);
713 714
}

715
static bool
716 717 718
intel_dp_link_train_phy(struct intel_dp *intel_dp,
			const struct intel_crtc_state *crtc_state,
			enum drm_dp_phy dp_phy)
719
{
720
	struct intel_connector *intel_connector = intel_dp->attached_connector;
721
	char phy_name[10];
722 723
	bool ret = false;

724
	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
725 726
		goto out;

727
	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
728
		goto out;
729

730
	ret = true;
731

732
out:
733
	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
734
		    "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s",
735 736
		    intel_connector->base.base.id,
		    intel_connector->base.name,
737
		    ret ? "passed" : "failed",
738 739
		    crtc_state->port_clock, crtc_state->lane_count,
		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
740

741 742 743 744 745 746 747 748
	return ret;
}

static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
						     const struct intel_crtc_state *crtc_state)
{
	struct intel_connector *intel_connector = intel_dp->attached_connector;

749 750 751 752 753
	if (intel_dp->hobl_active) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "Link Training failed with HOBL active, not enabling it from now on");
		intel_dp->hobl_failed = true;
	} else if (intel_dp_get_link_train_fallback_values(intel_dp,
754 755
							   crtc_state->port_clock,
							   crtc_state->lane_count)) {
756 757 758 759 760
		return;
	}

	/* Schedule a Hotplug Uevent to userspace to start modeset */
	schedule_work(&intel_connector->modeset_retry_work);
761
}
762

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
/* Perform the link training on all LTTPRs and the DPRX on a link. */
static bool
intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
			     const struct intel_crtc_state *crtc_state,
			     int lttpr_count)
{
	bool ret = true;
	int i;

	intel_dp_prepare_link_train(intel_dp, crtc_state);

	for (i = lttpr_count - 1; i >= 0; i--) {
		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);

		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);

		if (!ret)
			break;
	}

	if (ret)
		intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);

	if (intel_dp->set_idle_link_train)
		intel_dp->set_idle_link_train(intel_dp, crtc_state);

	return ret;
}

793 794 795 796 797 798 799 800 801 802 803 804 805
/**
 * intel_dp_start_link_train - start link training
 * @intel_dp: DP struct
 * @crtc_state: state for CRTC attached to the encoder
 *
 * Start the link training of the @intel_dp port, scheduling a fallback
 * retraining with reduced link rate/lane parameters if the link training
 * fails.
 * After calling this function intel_dp_stop_link_train() must be called.
 */
void intel_dp_start_link_train(struct intel_dp *intel_dp,
			       const struct intel_crtc_state *crtc_state)
{
806 807 808 809
	/*
	 * TODO: Reiniting LTTPRs here won't be needed once proper connector
	 * HW state readout is added.
	 */
810
	int lttpr_count = intel_dp_lttpr_init(intel_dp);
811

812
	if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
813 814
		intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}