intel_tc.c 15.5 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2019 Intel Corporation
 */

6
#include "i915_drv.h"
7
#include "intel_display.h"
8
#include "intel_display_types.h"
9
#include "intel_dp_mst.h"
10 11
#include "intel_tc.h"

12
static const char *tc_port_mode_name(enum tc_port_mode mode)
13 14
{
	static const char * const names[] = {
15 16
		[TC_PORT_TBT_ALT] = "tbt-alt",
		[TC_PORT_DP_ALT] = "dp-alt",
17 18 19
		[TC_PORT_LEGACY] = "legacy",
	};

20 21
	if (WARN_ON(mode >= ARRAY_SIZE(names)))
		mode = TC_PORT_TBT_ALT;
22

23
	return names[mode];
24 25
}

26 27 28
static void
tc_port_load_fia_params(struct drm_i915_private *i915,
			struct intel_digital_port *dig_port)
A
Anusha Srivatsa 已提交
29
{
30 31 32 33 34 35 36 37 38 39 40
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);
	u32 modular_fia;

	if (INTEL_INFO(i915)->display.has_modular_fia) {
		modular_fia = intel_uncore_read(&i915->uncore,
						PORT_TX_DFLEXDPSP(FIA1));
		modular_fia &= MODULAR_FIA_MASK;
	} else {
		modular_fia = 0;
	}
A
Anusha Srivatsa 已提交
41 42 43 44 45

	/*
	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
	 * than two TC ports, there are multiple instances of Modular FIA.
	 */
46 47 48 49 50 51 52
	if (modular_fia) {
		dig_port->tc_phy_fia = tc_port / 2;
		dig_port->tc_phy_fia_idx = tc_port % 2;
	} else {
		dig_port->tc_phy_fia = FIA1;
		dig_port->tc_phy_fia_idx = tc_port;
	}
A
Anusha Srivatsa 已提交
53 54
}

55
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
56
{
57 58
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
59 60
	u32 lane_mask;

A
Anusha Srivatsa 已提交
61 62
	lane_mask = intel_uncore_read(uncore,
				      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
63

64
	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
65

66 67
	lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
68 69
}

70 71 72 73 74 75 76 77 78
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
	u32 pin_mask;

	pin_mask = intel_uncore_read(uncore,
				     PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));

79
	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
80 81 82 83 84

	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
	       DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}

85 86
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
{
87
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
88
	intel_wakeref_t wakeref;
89
	u32 lane_mask;
90

91
	if (dig_port->tc_mode != TC_PORT_DP_ALT)
92 93
		return 4;

94
	lane_mask = 0;
95
	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
96
		lane_mask = intel_tc_port_get_lane_mask(dig_port);
97

98
	switch (lane_mask) {
99
	default:
100
		MISSING_CASE(lane_mask);
101 102 103 104 105
		/* fall-through */
	case 0x1:
	case 0x2:
	case 0x4:
	case 0x8:
106
		return 1;
107 108
	case 0x3:
	case 0xc:
109
		return 2;
110
	case 0xf:
111 112 113 114
		return 4;
	}
}

115 116 117 118 119 120 121 122
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
				      int required_lanes)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
	struct intel_uncore *uncore = &i915->uncore;
	u32 val;

123 124
	drm_WARN_ON(&i915->drm,
		    lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
125

A
Anusha Srivatsa 已提交
126 127
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
128
	val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
129 130 131

	switch (required_lanes) {
	case 1:
132 133 134
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
135 136
		break;
	case 2:
137 138 139
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
140 141
		break;
	case 4:
142
		val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
143 144 145 146 147
		break;
	default:
		MISSING_CASE(required_lanes);
	}

A
Anusha Srivatsa 已提交
148 149
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
150 151
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
				      u32 live_status_mask)
{
	u32 valid_hpd_mask;

	if (dig_port->tc_legacy_port)
		valid_hpd_mask = BIT(TC_PORT_LEGACY);
	else
		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
				 BIT(TC_PORT_TBT_ALT);

	if (!(live_status_mask & ~valid_hpd_mask))
		return;

	/* If live status mismatches the VBT flag, trust the live status. */
	DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
		  dig_port->tc_port_name, live_status_mask);

	dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}

static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
175 176 177
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
	struct intel_uncore *uncore = &i915->uncore;
178 179 180
	u32 mask = 0;
	u32 val;

A
Anusha Srivatsa 已提交
181 182
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
183

184 185 186 187 188 189
	if (val == 0xffffffff) {
		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
			      dig_port->tc_port_name);
		return mask;
	}

190
	if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
191
		mask |= BIT(TC_PORT_TBT_ALT);
192
	if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
193 194
		mask |= BIT(TC_PORT_DP_ALT);

195
	if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
196 197 198
		mask |= BIT(TC_PORT_LEGACY);

	/* The sink can be connected only in a single mode. */
199
	if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
200 201 202 203 204 205 206
		tc_port_fixup_legacy_flag(dig_port, mask);

	return mask;
}

static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
207 208
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
209 210
	u32 val;

A
Anusha Srivatsa 已提交
211 212
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
213 214 215 216 217
	if (val == 0xffffffff) {
		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
			      dig_port->tc_port_name);
		return false;
	}
218

219
	return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
220 221
}

222
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
223 224
				     bool enable)
{
225 226
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
227 228
	u32 val;

A
Anusha Srivatsa 已提交
229 230
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
231 232 233 234 235 236 237
	if (val == 0xffffffff) {
		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
			      dig_port->tc_port_name,
			      enableddisabled(enable));

		return false;
	}
238

239
	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
240
	if (!enable)
241
		val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
242

A
Anusha Srivatsa 已提交
243 244
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
245 246 247 248

	if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
		DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
			      dig_port->tc_port_name);
249 250

	return true;
251 252
}

253 254
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
255 256
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
257 258
	u32 val;

A
Anusha Srivatsa 已提交
259 260
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
261 262 263 264 265 266
	if (val == 0xffffffff) {
		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
			      dig_port->tc_port_name);
		return true;
	}

267
	return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
268 269
}

270 271 272 273 274 275 276 277 278 279 280
/*
 * This function implements the first part of the Connect Flow described by our
 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
 * lanes, EDID, etc) is done as needed in the typical places.
 *
 * Unlike the other ports, type-C ports are not available to use as soon as we
 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
 * display, USB, etc. As a result, handshaking through FIA is required around
 * connect and disconnect to cleanly transfer ownership with the controller and
 * set the type-C power state.
 */
281 282
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
			       int required_lanes)
283
{
284 285
	int max_lanes;

286
	if (!icl_tc_phy_status_complete(dig_port)) {
287 288
		DRM_DEBUG_KMS("Port %s: PHY not ready\n",
			      dig_port->tc_port_name);
289
		goto out_set_tbt_alt_mode;
290 291
	}

292 293 294
	if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
	    !WARN_ON(dig_port->tc_legacy_port))
		goto out_set_tbt_alt_mode;
295

296
	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
297
	if (dig_port->tc_legacy_port) {
298
		WARN_ON(max_lanes != 4);
299
		dig_port->tc_mode = TC_PORT_LEGACY;
300

301 302
		return;
	}
303 304 305 306 307

	/*
	 * Now we have to re-check the live state, in case the port recently
	 * became disconnected. Not necessary for legacy mode.
	 */
308
	if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
309 310
		DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
			      dig_port->tc_port_name);
311
		goto out_set_safe_mode;
312 313
	}

314 315 316 317 318 319 320
	if (max_lanes < required_lanes) {
		DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
			      dig_port->tc_port_name,
			      max_lanes, required_lanes);
		goto out_set_safe_mode;
	}

321 322 323 324 325 326 327 328
	dig_port->tc_mode = TC_PORT_DP_ALT;

	return;

out_set_safe_mode:
	icl_tc_phy_set_safe_mode(dig_port, true);
out_set_tbt_alt_mode:
	dig_port->tc_mode = TC_PORT_TBT_ALT;
329 330 331 332 333 334
}

/*
 * See the comment at the connect function. This implements the Disconnect
 * Flow.
 */
335
static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
336
{
337 338
	switch (dig_port->tc_mode) {
	case TC_PORT_LEGACY:
339 340
		/* Nothing to do, we never disconnect from legacy mode */
		break;
341 342 343 344 345 346 347 348 349
	case TC_PORT_DP_ALT:
		icl_tc_phy_set_safe_mode(dig_port, true);
		dig_port->tc_mode = TC_PORT_TBT_ALT;
		break;
	case TC_PORT_TBT_ALT:
		/* Nothing to do, we stay in TBT-alt mode */
		break;
	default:
		MISSING_CASE(dig_port->tc_mode);
350
	}
351
}
352

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
	if (!icl_tc_phy_status_complete(dig_port)) {
		DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
			      dig_port->tc_port_name);
		return dig_port->tc_mode == TC_PORT_TBT_ALT;
	}

	if (icl_tc_phy_is_in_safe_mode(dig_port)) {
		DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
			      dig_port->tc_port_name);

		return false;
	}

	return dig_port->tc_mode == TC_PORT_DP_ALT ||
	       dig_port->tc_mode == TC_PORT_LEGACY;
}

static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
	u32 live_status_mask = tc_port_live_status_mask(dig_port);
	bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
	enum tc_port_mode mode;

	if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
		return TC_PORT_TBT_ALT;

	mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
	if (live_status_mask) {
		enum tc_port_mode live_mode = fls(live_status_mask) - 1;

		if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
			mode = live_mode;
	}

	return mode;
}

393 394 395 396 397 398 399 400 401 402 403
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
	u32 live_status_mask = tc_port_live_status_mask(dig_port);

	if (live_status_mask)
		return fls(live_status_mask) - 1;

	return icl_tc_phy_status_complete(dig_port) &&
	       dig_port->tc_legacy_port ? TC_PORT_LEGACY :
					  TC_PORT_TBT_ALT;
404 405
}

406 407
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
				     int required_lanes)
408
{
409
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
410
	enum tc_port_mode old_tc_mode = dig_port->tc_mode;
411

412
	intel_display_power_flush_work(i915);
413 414 415
	drm_WARN_ON(&i915->drm,
		    intel_display_power_is_enabled(i915,
					intel_aux_power_domain(dig_port)));
416

417
	icl_tc_phy_disconnect(dig_port);
418
	icl_tc_phy_connect(dig_port, required_lanes);
419

420 421 422 423 424
	DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
		      dig_port->tc_port_name,
		      tc_port_mode_name(old_tc_mode),
		      tc_port_mode_name(dig_port->tc_mode));
}
425

426 427 428 429 430 431 432 433
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
				 int refcount)
{
	WARN_ON(dig_port->tc_link_refcount);
	dig_port->tc_link_refcount = refcount;
}

434 435 436 437 438
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
	struct intel_encoder *encoder = &dig_port->base;
	int active_links = 0;

439 440
	mutex_lock(&dig_port->tc_lock);

441 442 443 444 445 446 447 448 449 450
	dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
	if (dig_port->dp.is_mst)
		active_links = intel_dp_mst_encoder_active_links(dig_port);
	else if (encoder->base.crtc)
		active_links = to_intel_crtc(encoder->base.crtc)->active;

	if (active_links) {
		if (!icl_tc_phy_is_connected(dig_port))
			DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
				      dig_port->tc_port_name, active_links);
451 452
		intel_tc_port_link_init_refcount(dig_port, active_links);

453 454 455 456
		goto out;
	}

	if (dig_port->tc_legacy_port)
457
		icl_tc_phy_connect(dig_port, 1);
458 459 460 461 462

out:
	DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
		      dig_port->tc_port_name,
		      tc_port_mode_name(dig_port->tc_mode));
463 464

	mutex_unlock(&dig_port->tc_lock);
465 466
}

467 468 469
static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
{
	return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
470 471 472 473 474 475 476 477 478 479 480 481 482 483
}

/*
 * The type-C ports are different because even when they are connected, they may
 * not be available/usable by the graphics driver: see the comment on
 * icl_tc_phy_connect(). So in our driver instead of adding the additional
 * concept of "usable" and make everything check for "connected and usable" we
 * define a port as "connected" when it is not only connected, but also when it
 * is usable by the rest of the driver. That maintains the old assumption that
 * connected ports are usable, and avoids exposing to the users objects they
 * can't really use.
 */
bool intel_tc_port_connected(struct intel_digital_port *dig_port)
{
484 485
	bool is_connected;

486
	intel_tc_port_lock(dig_port);
487 488
	is_connected = tc_port_live_status_mask(dig_port) &
		       BIT(dig_port->tc_mode);
489
	intel_tc_port_unlock(dig_port);
490 491 492 493

	return is_connected;
}

494 495
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
				 int required_lanes)
496
{
497
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
498 499
	intel_wakeref_t wakeref;

500
	wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
501

502
	mutex_lock(&dig_port->tc_lock);
503 504 505 506 507

	if (!dig_port->tc_link_refcount &&
	    intel_tc_port_needs_reset(dig_port))
		intel_tc_port_reset_mode(dig_port, required_lanes);

508
	drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
509 510 511 512 513 514
	dig_port->tc_lock_wakeref = wakeref;
}

void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
	__intel_tc_port_lock(dig_port, 1);
515 516 517 518
}

void intel_tc_port_unlock(struct intel_digital_port *dig_port)
{
519
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
520 521 522 523
	intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);

	mutex_unlock(&dig_port->tc_lock);

524
	intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
525 526 527
				      wakeref);
}

528 529 530 531 532 533
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
{
	return mutex_is_locked(&dig_port->tc_lock) ||
	       dig_port->tc_link_refcount;
}

534 535 536 537 538 539 540 541 542 543 544 545
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
			    int required_lanes)
{
	__intel_tc_port_lock(dig_port, required_lanes);
	dig_port->tc_link_refcount++;
	intel_tc_port_unlock(dig_port);
}

void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
	mutex_lock(&dig_port->tc_lock);
	dig_port->tc_link_refcount--;
546
	mutex_unlock(&dig_port->tc_lock);
547 548
}

549 550 551 552 553 554
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);

555
	if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE))
556 557 558 559 560
		return;

	snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
		 "%c/TC#%d", port_name(port), tc_port + 1);

561
	mutex_init(&dig_port->tc_lock);
562
	dig_port->tc_legacy_port = is_legacy;
563
	dig_port->tc_link_refcount = 0;
564
	tc_port_load_fia_params(i915, dig_port);
565
}