intel_dp_link_training.c 29.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2008-2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

24
#include "intel_display_types.h"
25
#include "intel_dp.h"
26
#include "intel_dp_link_training.h"
27

28
static void
29 30
intel_dp_dump_link_status(struct drm_device *drm,
			  const u8 link_status[DP_LINK_STATUS_SIZE])
31
{
32 33 34 35
	drm_dbg_kms(drm,
		    "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
		    link_status[0], link_status[1], link_status[2],
		    link_status[3], link_status[4], link_status[5]);
36 37
}

38 39
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
40
	memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
41 42
}

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
}

static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
				     char *buf, size_t buf_size)
{
	if (dp_phy == DP_PHY_DPRX)
		snprintf(buf, buf_size, "DPRX");
	else
		snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);

	return buf;
}

static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
				   enum drm_dp_phy dp_phy)
{
	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
}

static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
					 enum drm_dp_phy dp_phy)
{
	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
	char phy_name[10];

	intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));

	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "failed to read the PHY caps for %s\n",
			    phy_name);
		return;
	}

	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
		    "%s PHY capabilities: %*ph\n",
		    phy_name,
		    (int)sizeof(intel_dp->lttpr_phy_caps[0]),
		    phy_caps);
}

88 89
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
90 91 92 93 94 95 96 97 98
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	if (intel_dp_is_edp(intel_dp))
		return false;

	/*
	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
	 */
99
	if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
100 101
		return false;

102
	if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
103 104
					  intel_dp->lttpr_common_caps) < 0)
		goto reset_caps;
105 106 107 108 109 110

	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
		    "LTTPR common capabilities: %*ph\n",
		    (int)sizeof(intel_dp->lttpr_common_caps),
		    intel_dp->lttpr_common_caps);

111 112 113 114
	/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
	if (intel_dp->lttpr_common_caps[0] < 0x14)
		goto reset_caps;

115
	return true;
116 117 118 119

reset_caps:
	intel_dp_reset_lttpr_common_caps(intel_dp);
	return false;
120 121 122 123 124 125 126 127 128 129 130
}

static bool
intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
{
	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;

	return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1;
}

131
static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
132
{
133 134 135
	int lttpr_count;
	int i;

136
	if (!intel_dp_read_lttpr_common_caps(intel_dp))
137 138
		return 0;

139 140 141 142 143 144 145 146
	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
	/*
	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
	 * detected as this breaks link training at least on the Dell WD19TB
	 * dock.
	 */
	if (lttpr_count == 0)
		return 0;
147 148 149

	/*
	 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of
150 151
	 * non-transparent mode and the disable->enable non-transparent mode
	 * sequence.
152 153 154
	 */
	intel_dp_set_lttpr_transparent_mode(intel_dp, true);

155 156 157 158 159
	/*
	 * In case of unsupported number of LTTPRs or failing to switch to
	 * non-transparent mode fall-back to transparent link training mode,
	 * still taking into account any LTTPR common lane- rate/count limits.
	 */
160
	if (lttpr_count < 0)
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
		return 0;

	if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");

		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
		intel_dp_reset_lttpr_count(intel_dp);

		return 0;
	}

	for (i = 0; i < lttpr_count; i++)
		intel_dp_read_lttpr_phy_caps(intel_dp, DP_PHY_LTTPR(i));

	return lttpr_count;
177
}
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208

/**
 * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
 * @intel_dp: Intel DP struct
 *
 * Read the LTTPR common and DPRX capabilities and switch to non-transparent
 * link training mode if any is detected and read the PHY capabilities for all
 * detected LTTPRs. In case of an LTTPR detection error or if the number of
 * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
 * transparent mode link training mode.
 *
 * Returns:
 *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
 *       DPRX capabilities are read out.
 *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
 *       detection failure and the transparent LT mode was set. The DPRX
 *       capabilities are read out.
 *   <0  Reading out the DPRX capabilities failed.
 */
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
	int lttpr_count = intel_dp_init_lttpr(intel_dp);

	/* The DPTX shall read the DPRX caps after LTTPR detection. */
	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
		intel_dp_reset_lttpr_common_caps(intel_dp);
		return -EIO;
	}

	return lttpr_count;
}
209

210
static u8 dp_voltage_max(u8 preemph)
211
{
212 213 214 215 216 217 218 219
	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
220
	default:
221
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
222 223 224
	}
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
	else
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}

static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
		return DP_TRAIN_PRE_EMPH_LEVEL_3;
	else
		return DP_TRAIN_PRE_EMPH_LEVEL_2;
}

static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
				     enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
252
	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
253

254
	drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
255

256
	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
}

static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
				   const struct intel_crtc_state *crtc_state,
				   enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 voltage_max;

	/*
	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
	 * the DPRX_PHY we train.
	 */
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
	else
		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);

	drm_WARN_ON_ONCE(&i915->drm,
			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);

	return voltage_max;
}

static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
				   enum drm_dp_phy dp_phy)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	u8 preemph_max;

	/*
	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
	 * the DPRX_PHY we train.
	 */
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
		preemph_max = intel_dp->preemph_max(intel_dp);
	else
		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);

	drm_WARN_ON_ONCE(&i915->drm,
			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);

	return preemph_max;
}

304 305 306
static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
				       enum drm_dp_phy dp_phy)
{
307
	return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy);
308 309 310 311 312 313 314
}

static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
					 const struct intel_crtc_state *crtc_state,
					 enum drm_dp_phy dp_phy,
					 const u8 link_status[DP_LINK_STATUS_SIZE],
					 int lane)
315
{
316 317 318 319
	u8 v = 0;
	u8 p = 0;
	u8 voltage_max;
	u8 preemph_max;
320

321 322 323 324 325 326 327 328 329 330
	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
		lane = min(lane, crtc_state->lane_count - 1);

		v = drm_dp_get_adjust_request_voltage(link_status, lane);
		p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
	} else {
		for (lane = 0; lane < crtc_state->lane_count; lane++) {
			v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
			p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
		}
331 332
	}

333
	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
334 335 336
	if (p >= preemph_max)
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;

337 338
	v = min(v, dp_voltage_max(p));

339
	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
340 341 342
	if (v >= voltage_max)
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;

343 344 345 346 347 348 349 350 351 352 353
	return v | p;
}

void
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
			  const struct intel_crtc_state *crtc_state,
			  enum drm_dp_phy dp_phy,
			  const u8 link_status[DP_LINK_STATUS_SIZE])
{
	int lane;

354
	for (lane = 0; lane < 4; lane++)
355 356 357
		intel_dp->train_set[lane] =
			intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
						       dp_phy, link_status, lane);
358 359
}

360 361 362 363 364 365 366 367
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
					     enum drm_dp_phy dp_phy)
{
	return dp_phy == DP_PHY_DPRX ?
		DP_TRAINING_PATTERN_SET :
		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
}

368 369
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
370
			const struct intel_crtc_state *crtc_state,
371
			enum drm_dp_phy dp_phy,
372
			u8 dp_train_pat)
373
{
374
	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
375
	u8 buf[sizeof(intel_dp->train_set) + 1];
376
	int len;
377

378
	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
379
					       dp_phy, dp_train_pat);
380 381

	buf[0] = dp_train_pat;
382 383 384
	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
	len = crtc_state->lane_count + 1;
385

386
	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
387 388
}

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
static char dp_training_pattern_name(u8 train_pat)
{
	switch (train_pat) {
	case DP_TRAINING_PATTERN_1:
	case DP_TRAINING_PATTERN_2:
	case DP_TRAINING_PATTERN_3:
		return '0' + train_pat;
	case DP_TRAINING_PATTERN_4:
		return '4';
	default:
		MISSING_CASE(train_pat);
		return '?';
	}
}

void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
				       const struct intel_crtc_state *crtc_state,
407
				       enum drm_dp_phy dp_phy,
408 409 410 411 412
				       u8 dp_train_pat)
{
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
413
	char phy_name[10];
414 415 416

	if (train_pat != DP_TRAINING_PATTERN_DISABLE)
		drm_dbg_kms(&dev_priv->drm,
417
			    "[ENCODER:%d:%s] Using DP training pattern TPS%c, at %s\n",
418
			    encoder->base.base.id, encoder->base.name,
419 420
			    dp_training_pattern_name(train_pat),
			    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
421 422 423 424

	intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
#define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
#define _TRAIN_SET_VSWING_ARGS(train_set) \
	((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
	(train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
#define TRAIN_SET_VSWING_ARGS(train_set) \
	_TRAIN_SET_VSWING_ARGS((train_set)[0]), \
	_TRAIN_SET_VSWING_ARGS((train_set)[1]), \
	_TRAIN_SET_VSWING_ARGS((train_set)[2]), \
	_TRAIN_SET_VSWING_ARGS((train_set)[3])
#define _TRAIN_SET_PREEMPH_ARGS(train_set) \
	((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
	(train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
#define TRAIN_SET_PREEMPH_ARGS(train_set) \
	_TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
	_TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
	_TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
	_TRAIN_SET_PREEMPH_ARGS((train_set)[3])

443
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
444 445
				const struct intel_crtc_state *crtc_state,
				enum drm_dp_phy dp_phy)
446
{
447 448
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
449
	char phy_name[10];
450

451 452 453 454 455 456 457
	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] lanes: %d, "
		    "vswing levels: " TRAIN_SET_FMT ", "
		    "pre-emphasis levels: " TRAIN_SET_FMT ", at %s\n",
		    encoder->base.base.id, encoder->base.name,
		    crtc_state->lane_count,
		    TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
		    TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set),
458
		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
459

460
	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
461
		encoder->set_signal_levels(encoder, crtc_state);
462 463
}

464 465
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
466
			  const struct intel_crtc_state *crtc_state,
467
			  enum drm_dp_phy dp_phy,
468
			  u8 dp_train_pat)
469
{
470
	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
471
	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
472
	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
473 474 475
}

static bool
476
intel_dp_update_link_train(struct intel_dp *intel_dp,
477 478
			   const struct intel_crtc_state *crtc_state,
			   enum drm_dp_phy dp_phy)
479
{
480 481 482
	int reg = dp_phy == DP_PHY_DPRX ?
			    DP_TRAINING_LANE0_SET :
			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
483 484
	int ret;

485
	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
486

487
	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
488
				intel_dp->train_set, crtc_state->lane_count);
489

490
	return ret == crtc_state->lane_count;
491 492
}

493 494
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
					     const struct intel_crtc_state *crtc_state)
495 496 497
{
	int lane;

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
	/*
	 * FIXME: The DP spec is very confusing here, also the Link CTS
	 * spec seems to have self contradicting tests around this area.
	 *
	 * In lieu of better ideas let's just stop when we've reached the
	 * max supported vswing with its max pre-emphasis, which is either
	 * 2+1 or 3+0 depending on whether vswing level 3 is supported or not.
	 */
	for (lane = 0; lane < crtc_state->lane_count; lane++) {
		u8 v = (intel_dp->train_set[lane] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
			DP_TRAIN_VOLTAGE_SWING_SHIFT;
		u8 p = (intel_dp->train_set[lane] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
			DP_TRAIN_PRE_EMPHASIS_SHIFT;

		if ((intel_dp->train_set[lane] & DP_TRAIN_MAX_SWING_REACHED) == 0)
513 514
			return false;

515 516 517 518
		if (v + p != 3)
			return false;
	}

519 520 521
	return true;
}

522 523 524 525
/*
 * Prepare link training by configuring the link parameters. On DDI platforms
 * also enable the port here.
 */
526
static bool
527 528
intel_dp_prepare_link_train(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *crtc_state)
529
{
530
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
531 532
	u8 link_config[2];
	u8 link_bw, rate_select;
533

534
	if (intel_dp->prepare_link_retrain)
535
		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
536

537
	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
538 539
			      &link_bw, &rate_select);

540
	if (link_bw)
541 542
		drm_dbg_kms(&i915->drm,
			    "Using LINK_BW_SET value %02x\n", link_bw);
543
	else
544 545
		drm_dbg_kms(&i915->drm,
			    "Using LINK_RATE_SET value %02x\n", rate_select);
546

547 548
	/* Write the link configuration data */
	link_config[0] = link_bw;
549
	link_config[1] = crtc_state->lane_count;
550 551 552
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
553

554 555
	/* eDP 1.4 rate select method. */
	if (!link_bw)
556 557 558
		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
				  &rate_select, 1);

559
	link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
560 561
	link_config[1] = intel_dp_is_uhbr(crtc_state) ?
		DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
562 563
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);

564 565 566
	return true;
}

567 568 569 570
static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp,
							enum drm_dp_phy dp_phy)
{
	if (dp_phy == DP_PHY_DPRX)
571
		drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd);
572 573 574 575
	else
		drm_dp_lttpr_link_train_clock_recovery_delay();
}

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
static bool intel_dp_adjust_request_changed(int lane_count,
					    const u8 old_link_status[DP_LINK_STATUS_SIZE],
					    const u8 new_link_status[DP_LINK_STATUS_SIZE])
{
	int lane;

	for (lane = 0; lane < lane_count; lane++) {
		u8 old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
			drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
		u8 new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
			drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);

		if (old != new)
			return true;
	}

	return false;
}

595 596 597 598
/*
 * Perform the link training clock recovery phase on the given DP PHY using
 * training pattern 1.
 */
599 600
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
601 602
				      const struct intel_crtc_state *crtc_state,
				      enum drm_dp_phy dp_phy)
603 604
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
605
	u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
606 607 608
	int voltage_tries, cr_tries, max_cr_tries;
	bool max_vswing_reached = false;

609
	/* clock recovery */
610
	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
611 612
				       DP_TRAINING_PATTERN_1 |
				       DP_LINK_SCRAMBLING_DISABLE)) {
613
		drm_err(&i915->drm, "failed to enable link training\n");
614
		return false;
615 616
	}

617
	/*
618 619 620 621 622 623
	 * The DP 1.4 spec defines the max clock recovery retries value
	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
	 * x 5 identical voltage retries). Since the previous specs didn't
	 * define a limit and created the possibility of an infinite loop
	 * we want to prevent any sync from triggering that corner case.
624 625 626 627 628 629
	 */
	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
		max_cr_tries = 10;
	else
		max_cr_tries = 80;

630
	voltage_tries = 1;
631
	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
632
		u8 link_status[DP_LINK_STATUS_SIZE];
633

634
		intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
635

636 637
		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
						     link_status) < 0) {
638
			drm_err(&i915->drm, "failed to get link status\n");
639
			return false;
640 641
		}

642
		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
643
			drm_dbg_kms(&i915->drm, "clock recovery OK\n");
644
			return true;
645 646
		}

647
		if (voltage_tries == 5) {
648 649
			drm_dbg_kms(&i915->drm,
				    "Same voltage tried 5 times\n");
650 651 652
			return false;
		}

653
		if (max_vswing_reached) {
654
			drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
655
			return false;
656 657 658
		}

		/* Update training set as requested by target */
659 660 661
		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
					  link_status);
		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
662 663
			drm_err(&i915->drm,
				"failed to update link training\n");
664
			return false;
665
		}
666

667 668
		if (!intel_dp_adjust_request_changed(crtc_state->lane_count,
						     old_link_status, link_status))
669 670 671 672
			++voltage_tries;
		else
			voltage_tries = 1;

673 674
		memcpy(old_link_status, link_status, sizeof(link_status));

675
		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
676
			max_vswing_reached = true;
677

678
	}
679 680
	drm_err(&i915->drm,
		"Failed clock recovery %d times, giving up!\n", max_cr_tries);
681
	return false;
682 683
}

684
/*
685 686 687
 * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
 * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
 * 1.2 devices that support it, TPS2 otherwise.
688
 */
689
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
690 691
				     const struct intel_crtc_state *crtc_state,
				     enum drm_dp_phy dp_phy)
692
{
693
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
694
	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
695

696 697 698 699
	/* UHBR+ use separate 128b/132b TPS2 */
	if (intel_dp_is_uhbr(crtc_state))
		return DP_TRAINING_PATTERN_2;

700
	/*
701 702 703
	 * TPS4 support is mandatory for all downstream devices that
	 * support HBR3. There are no known eDP panels that support
	 * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
704
	 * LTTPRs must support TPS4.
705
	 */
706
	source_tps4 = intel_dp_source_supports_tps4(i915);
707 708
	sink_tps4 = dp_phy != DP_PHY_DPRX ||
		    drm_dp_tps4_supported(intel_dp->dpcd);
709 710
	if (source_tps4 && sink_tps4) {
		return DP_TRAINING_PATTERN_4;
711
	} else if (crtc_state->port_clock == 810000) {
712
		if (!source_tps4)
713 714
			drm_dbg_kms(&i915->drm,
				    "8.1 Gbps link rate without source TPS4 support\n");
715
		if (!sink_tps4)
716
			drm_dbg_kms(&i915->drm,
717
				    "8.1 Gbps link rate without sink TPS4 support\n");
718
	}
719

720
	/*
721 722
	 * TPS3 support is mandatory for downstream devices that
	 * support HBR2. However, not all sinks follow the spec.
723
	 */
724
	source_tps3 = intel_dp_source_supports_tps3(i915);
725 726
	sink_tps3 = dp_phy != DP_PHY_DPRX ||
		    drm_dp_tps3_supported(intel_dp->dpcd);
727
	if (source_tps3 && sink_tps3) {
728
		return  DP_TRAINING_PATTERN_3;
729
	} else if (crtc_state->port_clock >= 540000) {
730
		if (!source_tps3)
731 732
			drm_dbg_kms(&i915->drm,
				    ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
733
		if (!sink_tps3)
734
			drm_dbg_kms(&i915->drm,
735
				    ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
736
	}
737

738
	return DP_TRAINING_PATTERN_2;
739 740
}

741 742 743 744 745
static void
intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
						  enum drm_dp_phy dp_phy)
{
	if (dp_phy == DP_PHY_DPRX) {
746
		drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd);
747 748 749
	} else {
		const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);

750
		drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps);
751 752 753
	}
}

754
/*
755 756 757
 * Perform the link training channel equalization phase on the given DP PHY
 * using one of training pattern 2, 3 or 4 depending on the source and
 * sink capabilities.
758
 */
759
static bool
760
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
761 762
					    const struct intel_crtc_state *crtc_state,
					    enum drm_dp_phy dp_phy)
763
{
764
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
765
	int tries;
766
	u32 training_pattern;
767
	u8 link_status[DP_LINK_STATUS_SIZE];
768
	bool channel_eq = false;
769

770
	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
771 772 773
	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
	if (training_pattern != DP_TRAINING_PATTERN_4)
		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
774

775
	/* channel equalization */
776
	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
777
				     training_pattern)) {
778
		drm_err(&i915->drm, "failed to start channel equalization\n");
779
		return false;
780 781
	}

782
	for (tries = 0; tries < 5; tries++) {
783 784 785 786
		intel_dp_link_training_channel_equalization_delay(intel_dp,
								  dp_phy);
		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
						     link_status) < 0) {
787 788
			drm_err(&i915->drm,
				"failed to get link status\n");
789 790 791 792 793
			break;
		}

		/* Make sure clock is still ok */
		if (!drm_dp_clock_recovery_ok(link_status,
794
					      crtc_state->lane_count)) {
795
			intel_dp_dump_link_status(&i915->drm, link_status);
796 797 798
			drm_dbg_kms(&i915->drm,
				    "Clock recovery check failed, cannot "
				    "continue channel equalization\n");
799
			break;
800 801 802
		}

		if (drm_dp_channel_eq_ok(link_status,
803
					 crtc_state->lane_count)) {
804
			channel_eq = true;
805 806
			drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
				    "successful\n");
807 808 809 810
			break;
		}

		/* Update training set as requested by target */
811 812 813
		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
					  link_status);
		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
814 815
			drm_err(&i915->drm,
				"failed to update link training\n");
816 817
			break;
		}
818 819 820 821
	}

	/* Try 5 times, else fail and try at lower BW */
	if (tries == 5) {
822
		intel_dp_dump_link_status(&i915->drm, link_status);
823 824
		drm_dbg_kms(&i915->drm,
			    "Channel equalization failed 5 times\n");
825 826
	}

827
	return channel_eq;
828 829
}

830 831
static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
						   enum drm_dp_phy dp_phy)
832
{
833
	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
834 835
	u8 val = DP_TRAINING_PATTERN_DISABLE;

836
	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
837 838
}

839 840 841 842 843
/**
 * intel_dp_stop_link_train - stop link training
 * @intel_dp: DP struct
 * @crtc_state: state for CRTC attached to the encoder
 *
844 845 846
 * Stop the link training of the @intel_dp port, disabling the training
 * pattern in the sink's DPCD, and disabling the test pattern symbol
 * generation on the port.
847 848 849 850 851 852 853 854
 *
 * What symbols are output on the port after this point is
 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
 * with the pipe being disabled, on older platforms it's HW specific if/how an
 * idle pattern is generated, as the pipe is already enabled here for those.
 *
 * This function must be called after intel_dp_start_link_train().
 */
855 856
void intel_dp_stop_link_train(struct intel_dp *intel_dp,
			      const struct intel_crtc_state *crtc_state)
857
{
858 859
	intel_dp->link_trained = true;

860
	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
861
	intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
862
					       DP_TRAINING_PATTERN_DISABLE);
863 864
}

865
static bool
866 867 868
intel_dp_link_train_phy(struct intel_dp *intel_dp,
			const struct intel_crtc_state *crtc_state,
			enum drm_dp_phy dp_phy)
869
{
870
	struct intel_connector *intel_connector = intel_dp->attached_connector;
871
	char phy_name[10];
872 873
	bool ret = false;

874
	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
875 876
		goto out;

877
	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
878
		goto out;
879

880
	ret = true;
881

882
out:
883
	drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
884
		    "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n",
885 886
		    intel_connector->base.base.id,
		    intel_connector->base.name,
887
		    ret ? "passed" : "failed",
888 889
		    crtc_state->port_clock, crtc_state->lane_count,
		    intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
890

891 892 893 894 895 896 897 898
	return ret;
}

static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
						     const struct intel_crtc_state *crtc_state)
{
	struct intel_connector *intel_connector = intel_dp->attached_connector;

899 900 901 902 903
	if (intel_dp->hobl_active) {
		drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
			    "Link Training failed with HOBL active, not enabling it from now on");
		intel_dp->hobl_failed = true;
	} else if (intel_dp_get_link_train_fallback_values(intel_dp,
904 905
							   crtc_state->port_clock,
							   crtc_state->lane_count)) {
906 907 908 909 910
		return;
	}

	/* Schedule a Hotplug Uevent to userspace to start modeset */
	schedule_work(&intel_connector->modeset_retry_work);
911
}
912

913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
/* Perform the link training on all LTTPRs and the DPRX on a link. */
static bool
intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
			     const struct intel_crtc_state *crtc_state,
			     int lttpr_count)
{
	bool ret = true;
	int i;

	intel_dp_prepare_link_train(intel_dp, crtc_state);

	for (i = lttpr_count - 1; i >= 0; i--) {
		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);

		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);

		if (!ret)
			break;
	}

	if (ret)
935
		ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
936 937 938 939 940 941 942

	if (intel_dp->set_idle_link_train)
		intel_dp->set_idle_link_train(intel_dp, crtc_state);

	return ret;
}

943 944 945 946 947 948 949 950 951 952 953 954 955
/**
 * intel_dp_start_link_train - start link training
 * @intel_dp: DP struct
 * @crtc_state: state for CRTC attached to the encoder
 *
 * Start the link training of the @intel_dp port, scheduling a fallback
 * retraining with reduced link rate/lane parameters if the link training
 * fails.
 * After calling this function intel_dp_stop_link_train() must be called.
 */
void intel_dp_start_link_train(struct intel_dp *intel_dp,
			       const struct intel_crtc_state *crtc_state)
{
956 957 958 959
	/*
	 * TODO: Reiniting LTTPRs here won't be needed once proper connector
	 * HW state readout is added.
	 */
960 961 962
	int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);

	if (lttpr_count < 0)
963 964
		/* Still continue with enabling the port and link training. */
		lttpr_count = 0;
965

966
	if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
967 968
		intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
}