intel_tc.c 15.6 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2019 Intel Corporation
 */

6
#include "i915_drv.h"
7
#include "intel_display.h"
8
#include "intel_display_types.h"
9
#include "intel_dp_mst.h"
10 11
#include "intel_tc.h"

12
static const char *tc_port_mode_name(enum tc_port_mode mode)
13 14
{
	static const char * const names[] = {
15 16
		[TC_PORT_TBT_ALT] = "tbt-alt",
		[TC_PORT_DP_ALT] = "dp-alt",
17 18 19
		[TC_PORT_LEGACY] = "legacy",
	};

20 21
	if (WARN_ON(mode >= ARRAY_SIZE(names)))
		mode = TC_PORT_TBT_ALT;
22

23
	return names[mode];
24 25
}

26 27 28
static void
tc_port_load_fia_params(struct drm_i915_private *i915,
			struct intel_digital_port *dig_port)
A
Anusha Srivatsa 已提交
29
{
30 31 32 33 34 35 36 37 38 39 40
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);
	u32 modular_fia;

	if (INTEL_INFO(i915)->display.has_modular_fia) {
		modular_fia = intel_uncore_read(&i915->uncore,
						PORT_TX_DFLEXDPSP(FIA1));
		modular_fia &= MODULAR_FIA_MASK;
	} else {
		modular_fia = 0;
	}
A
Anusha Srivatsa 已提交
41 42 43 44 45

	/*
	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
	 * than two TC ports, there are multiple instances of Modular FIA.
	 */
46 47 48 49 50 51 52
	if (modular_fia) {
		dig_port->tc_phy_fia = tc_port / 2;
		dig_port->tc_phy_fia_idx = tc_port % 2;
	} else {
		dig_port->tc_phy_fia = FIA1;
		dig_port->tc_phy_fia_idx = tc_port;
	}
A
Anusha Srivatsa 已提交
53 54
}

55
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
56
{
57 58
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
59 60
	u32 lane_mask;

A
Anusha Srivatsa 已提交
61 62
	lane_mask = intel_uncore_read(uncore,
				      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
63

64
	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
65

66 67
	lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
68 69
}

70 71 72 73 74 75 76 77 78
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
	u32 pin_mask;

	pin_mask = intel_uncore_read(uncore,
				     PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));

79
	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
80 81 82 83 84

	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
	       DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}

85 86
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
{
87
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
88
	intel_wakeref_t wakeref;
89
	u32 lane_mask;
90

91
	if (dig_port->tc_mode != TC_PORT_DP_ALT)
92 93
		return 4;

94
	lane_mask = 0;
95
	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
96
		lane_mask = intel_tc_port_get_lane_mask(dig_port);
97

98
	switch (lane_mask) {
99
	default:
100
		MISSING_CASE(lane_mask);
101 102 103 104 105
		/* fall-through */
	case 0x1:
	case 0x2:
	case 0x4:
	case 0x8:
106
		return 1;
107 108
	case 0x3:
	case 0xc:
109
		return 2;
110
	case 0xf:
111 112 113 114
		return 4;
	}
}

115 116 117 118 119 120 121 122
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
				      int required_lanes)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
	struct intel_uncore *uncore = &i915->uncore;
	u32 val;

123 124
	drm_WARN_ON(&i915->drm,
		    lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
125

A
Anusha Srivatsa 已提交
126 127
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
128
	val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
129 130 131

	switch (required_lanes) {
	case 1:
132 133 134
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
135 136
		break;
	case 2:
137 138 139
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
140 141
		break;
	case 4:
142
		val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
143 144 145 146 147
		break;
	default:
		MISSING_CASE(required_lanes);
	}

A
Anusha Srivatsa 已提交
148 149
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
150 151
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
				      u32 live_status_mask)
{
	u32 valid_hpd_mask;

	if (dig_port->tc_legacy_port)
		valid_hpd_mask = BIT(TC_PORT_LEGACY);
	else
		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
				 BIT(TC_PORT_TBT_ALT);

	if (!(live_status_mask & ~valid_hpd_mask))
		return;

	/* If live status mismatches the VBT flag, trust the live status. */
	DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
		  dig_port->tc_port_name, live_status_mask);

	dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}

static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
175 176 177
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
	struct intel_uncore *uncore = &i915->uncore;
178 179 180
	u32 mask = 0;
	u32 val;

A
Anusha Srivatsa 已提交
181 182
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
183

184
	if (val == 0xffffffff) {
185 186 187
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, nothing connected\n",
			    dig_port->tc_port_name);
188 189 190
		return mask;
	}

191
	if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
192
		mask |= BIT(TC_PORT_TBT_ALT);
193
	if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
194 195
		mask |= BIT(TC_PORT_DP_ALT);

196
	if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
197 198 199
		mask |= BIT(TC_PORT_LEGACY);

	/* The sink can be connected only in a single mode. */
200
	if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
201 202 203 204 205 206 207
		tc_port_fixup_legacy_flag(dig_port, mask);

	return mask;
}

static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
208 209
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
210 211
	u32 val;

A
Anusha Srivatsa 已提交
212 213
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
214
	if (val == 0xffffffff) {
215 216 217
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, assuming not complete\n",
			    dig_port->tc_port_name);
218 219
		return false;
	}
220

221
	return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
222 223
}

224
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
225 226
				     bool enable)
{
227 228
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
229 230
	u32 val;

A
Anusha Srivatsa 已提交
231 232
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
233
	if (val == 0xffffffff) {
234 235 236
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
			    dig_port->tc_port_name,
237 238 239 240
			      enableddisabled(enable));

		return false;
	}
241

242
	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
243
	if (!enable)
244
		val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
245

A
Anusha Srivatsa 已提交
246 247
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
248 249

	if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
250 251 252
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY complete clear timed out\n",
			    dig_port->tc_port_name);
253 254

	return true;
255 256
}

257 258
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
259 260
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
261 262
	u32 val;

A
Anusha Srivatsa 已提交
263 264
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
265
	if (val == 0xffffffff) {
266 267 268
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, assume safe mode\n",
			    dig_port->tc_port_name);
269 270 271
		return true;
	}

272
	return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
273 274
}

275 276 277 278 279 280 281 282 283 284 285
/*
 * This function implements the first part of the Connect Flow described by our
 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
 * lanes, EDID, etc) is done as needed in the typical places.
 *
 * Unlike the other ports, type-C ports are not available to use as soon as we
 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
 * display, USB, etc. As a result, handshaking through FIA is required around
 * connect and disconnect to cleanly transfer ownership with the controller and
 * set the type-C power state.
 */
286 287
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
			       int required_lanes)
288
{
289 290
	int max_lanes;

291
	if (!icl_tc_phy_status_complete(dig_port)) {
292 293
		DRM_DEBUG_KMS("Port %s: PHY not ready\n",
			      dig_port->tc_port_name);
294
		goto out_set_tbt_alt_mode;
295 296
	}

297 298 299
	if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
	    !WARN_ON(dig_port->tc_legacy_port))
		goto out_set_tbt_alt_mode;
300

301
	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
302
	if (dig_port->tc_legacy_port) {
303
		WARN_ON(max_lanes != 4);
304
		dig_port->tc_mode = TC_PORT_LEGACY;
305

306 307
		return;
	}
308 309 310 311 312

	/*
	 * Now we have to re-check the live state, in case the port recently
	 * became disconnected. Not necessary for legacy mode.
	 */
313
	if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
314 315
		DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
			      dig_port->tc_port_name);
316
		goto out_set_safe_mode;
317 318
	}

319 320 321 322 323 324 325
	if (max_lanes < required_lanes) {
		DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
			      dig_port->tc_port_name,
			      max_lanes, required_lanes);
		goto out_set_safe_mode;
	}

326 327 328 329 330 331 332 333
	dig_port->tc_mode = TC_PORT_DP_ALT;

	return;

out_set_safe_mode:
	icl_tc_phy_set_safe_mode(dig_port, true);
out_set_tbt_alt_mode:
	dig_port->tc_mode = TC_PORT_TBT_ALT;
334 335 336 337 338 339
}

/*
 * See the comment at the connect function. This implements the Disconnect
 * Flow.
 */
340
static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
341
{
342 343
	switch (dig_port->tc_mode) {
	case TC_PORT_LEGACY:
344 345
		/* Nothing to do, we never disconnect from legacy mode */
		break;
346 347 348 349 350 351 352 353 354
	case TC_PORT_DP_ALT:
		icl_tc_phy_set_safe_mode(dig_port, true);
		dig_port->tc_mode = TC_PORT_TBT_ALT;
		break;
	case TC_PORT_TBT_ALT:
		/* Nothing to do, we stay in TBT-alt mode */
		break;
	default:
		MISSING_CASE(dig_port->tc_mode);
355
	}
356
}
357

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
	if (!icl_tc_phy_status_complete(dig_port)) {
		DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
			      dig_port->tc_port_name);
		return dig_port->tc_mode == TC_PORT_TBT_ALT;
	}

	if (icl_tc_phy_is_in_safe_mode(dig_port)) {
		DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
			      dig_port->tc_port_name);

		return false;
	}

	return dig_port->tc_mode == TC_PORT_DP_ALT ||
	       dig_port->tc_mode == TC_PORT_LEGACY;
}

static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
	u32 live_status_mask = tc_port_live_status_mask(dig_port);
	bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
	enum tc_port_mode mode;

	if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
		return TC_PORT_TBT_ALT;

	mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
	if (live_status_mask) {
		enum tc_port_mode live_mode = fls(live_status_mask) - 1;

		if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
			mode = live_mode;
	}

	return mode;
}

398 399 400 401 402 403 404 405 406 407 408
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
	u32 live_status_mask = tc_port_live_status_mask(dig_port);

	if (live_status_mask)
		return fls(live_status_mask) - 1;

	return icl_tc_phy_status_complete(dig_port) &&
	       dig_port->tc_legacy_port ? TC_PORT_LEGACY :
					  TC_PORT_TBT_ALT;
409 410
}

411 412
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
				     int required_lanes)
413
{
414
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
415
	enum tc_port_mode old_tc_mode = dig_port->tc_mode;
416

417
	intel_display_power_flush_work(i915);
418 419 420
	drm_WARN_ON(&i915->drm,
		    intel_display_power_is_enabled(i915,
					intel_aux_power_domain(dig_port)));
421

422
	icl_tc_phy_disconnect(dig_port);
423
	icl_tc_phy_connect(dig_port, required_lanes);
424

425 426 427 428
	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
		    dig_port->tc_port_name,
		    tc_port_mode_name(old_tc_mode),
		    tc_port_mode_name(dig_port->tc_mode));
429
}
430

431 432 433 434 435 436 437 438
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
				 int refcount)
{
	WARN_ON(dig_port->tc_link_refcount);
	dig_port->tc_link_refcount = refcount;
}

439 440 441 442 443
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
	struct intel_encoder *encoder = &dig_port->base;
	int active_links = 0;

444 445
	mutex_lock(&dig_port->tc_lock);

446 447 448 449 450 451 452 453 454 455
	dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
	if (dig_port->dp.is_mst)
		active_links = intel_dp_mst_encoder_active_links(dig_port);
	else if (encoder->base.crtc)
		active_links = to_intel_crtc(encoder->base.crtc)->active;

	if (active_links) {
		if (!icl_tc_phy_is_connected(dig_port))
			DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
				      dig_port->tc_port_name, active_links);
456 457
		intel_tc_port_link_init_refcount(dig_port, active_links);

458 459 460 461
		goto out;
	}

	if (dig_port->tc_legacy_port)
462
		icl_tc_phy_connect(dig_port, 1);
463 464 465 466 467

out:
	DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
		      dig_port->tc_port_name,
		      tc_port_mode_name(dig_port->tc_mode));
468 469

	mutex_unlock(&dig_port->tc_lock);
470 471
}

472 473 474
static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
{
	return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
475 476 477 478 479 480 481 482 483 484 485 486 487 488
}

/*
 * The type-C ports are different because even when they are connected, they may
 * not be available/usable by the graphics driver: see the comment on
 * icl_tc_phy_connect(). So in our driver instead of adding the additional
 * concept of "usable" and make everything check for "connected and usable" we
 * define a port as "connected" when it is not only connected, but also when it
 * is usable by the rest of the driver. That maintains the old assumption that
 * connected ports are usable, and avoids exposing to the users objects they
 * can't really use.
 */
bool intel_tc_port_connected(struct intel_digital_port *dig_port)
{
489 490
	bool is_connected;

491
	intel_tc_port_lock(dig_port);
492 493
	is_connected = tc_port_live_status_mask(dig_port) &
		       BIT(dig_port->tc_mode);
494
	intel_tc_port_unlock(dig_port);
495 496 497 498

	return is_connected;
}

499 500
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
				 int required_lanes)
501
{
502
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
503 504
	intel_wakeref_t wakeref;

505
	wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
506

507
	mutex_lock(&dig_port->tc_lock);
508 509 510 511 512

	if (!dig_port->tc_link_refcount &&
	    intel_tc_port_needs_reset(dig_port))
		intel_tc_port_reset_mode(dig_port, required_lanes);

513
	drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
514 515 516 517 518 519
	dig_port->tc_lock_wakeref = wakeref;
}

void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
	__intel_tc_port_lock(dig_port, 1);
520 521 522 523
}

void intel_tc_port_unlock(struct intel_digital_port *dig_port)
{
524
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
525 526 527 528
	intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);

	mutex_unlock(&dig_port->tc_lock);

529
	intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
530 531 532
				      wakeref);
}

533 534 535 536 537 538
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
{
	return mutex_is_locked(&dig_port->tc_lock) ||
	       dig_port->tc_link_refcount;
}

539 540 541 542 543 544 545 546 547 548 549 550
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
			    int required_lanes)
{
	__intel_tc_port_lock(dig_port, required_lanes);
	dig_port->tc_link_refcount++;
	intel_tc_port_unlock(dig_port);
}

void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
	mutex_lock(&dig_port->tc_lock);
	dig_port->tc_link_refcount--;
551
	mutex_unlock(&dig_port->tc_lock);
552 553
}

554 555 556 557 558 559
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);

560
	if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE))
561 562 563 564 565
		return;

	snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
		 "%c/TC#%d", port_name(port), tc_port + 1);

566
	mutex_init(&dig_port->tc_lock);
567
	dig_port->tc_legacy_port = is_legacy;
568
	dig_port->tc_link_refcount = 0;
569
	tc_port_load_fia_params(i915, dig_port);
570
}