intel_tc.c 26.7 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2019 Intel Corporation
 */

6
#include "i915_drv.h"
7
#include "i915_reg.h"
8
#include "intel_display.h"
9
#include "intel_display_types.h"
10
#include "intel_dp_mst.h"
11
#include "intel_tc.h"
12
#include "intel_tc_phy_regs.h"
13

14
static const char *tc_port_mode_name(enum tc_port_mode mode)
15 16
{
	static const char * const names[] = {
17
		[TC_PORT_DISCONNECTED] = "disconnected",
18 19
		[TC_PORT_TBT_ALT] = "tbt-alt",
		[TC_PORT_DP_ALT] = "dp-alt",
20 21 22
		[TC_PORT_LEGACY] = "legacy",
	};

23
	if (WARN_ON(mode >= ARRAY_SIZE(names)))
24
		mode = TC_PORT_DISCONNECTED;
25

26
	return names[mode];
27 28
}

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
				  enum tc_port_mode mode)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);

	return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode;
}

bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
{
	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
}

bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
{
	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
}

bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
{
	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
}

53 54 55 56 57 58 59 60
bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
		IS_ALDERLAKE_P(i915);
}

61
static enum intel_display_power_domain
62
tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
63
{
64
	if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
65
		return POWER_DOMAIN_TC_COLD_OFF;
66 67

	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
68 69
}

70
static intel_wakeref_t
71 72
tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode,
		      enum intel_display_power_domain *domain)
73 74 75
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

76 77 78 79 80 81 82 83 84
	*domain = tc_cold_get_power_domain(dig_port, mode);

	return intel_display_power_get(i915, *domain);
}

static intel_wakeref_t
tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain)
{
	return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain);
85 86 87
}

static void
88 89
tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain,
		intel_wakeref_t wakeref)
90 91 92 93 94 95 96 97 98 99 100
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	/*
	 * wakeref == -1, means some error happened saving save_depot_stack but
	 * power should still be put down and 0 is a invalid save_depot_stack
	 * id so can be used to skip it for non TC legacy ports.
	 */
	if (wakeref == 0)
		return;

101
	intel_display_power_put(i915, domain, wakeref);
102 103
}

104 105 106 107 108 109 110
static void
assert_tc_cold_blocked(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	bool enabled;

	enabled = intel_display_power_is_enabled(i915,
111 112
						 tc_cold_get_power_domain(dig_port,
									  dig_port->tc_mode));
113 114 115
	drm_WARN_ON(&i915->drm, !enabled);
}

116
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
117
{
118 119
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
120 121
	u32 lane_mask;

A
Anusha Srivatsa 已提交
122 123
	lane_mask = intel_uncore_read(uncore,
				      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
124

125
	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
126
	assert_tc_cold_blocked(dig_port);
127

128 129
	lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
130 131
}

132 133 134 135 136 137 138 139 140
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
	u32 pin_mask;

	pin_mask = intel_uncore_read(uncore,
				     PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));

141
	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
142
	assert_tc_cold_blocked(dig_port);
143 144 145 146 147

	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
	       DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}

148 149
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
{
150
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
151
	intel_wakeref_t wakeref;
152
	u32 lane_mask;
153

154
	if (dig_port->tc_mode != TC_PORT_DP_ALT)
155 156
		return 4;

157 158
	assert_tc_cold_blocked(dig_port);

159
	lane_mask = 0;
160
	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
161
		lane_mask = intel_tc_port_get_lane_mask(dig_port);
162

163
	switch (lane_mask) {
164
	default:
165
		MISSING_CASE(lane_mask);
166
		fallthrough;
167 168 169 170
	case 0x1:
	case 0x2:
	case 0x4:
	case 0x8:
171
		return 1;
172 173
	case 0x3:
	case 0xc:
174
		return 2;
175
	case 0xf:
176 177 178 179
		return 4;
	}
}

180 181 182 183 184 185 186 187
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
				      int required_lanes)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
	struct intel_uncore *uncore = &i915->uncore;
	u32 val;

188 189
	drm_WARN_ON(&i915->drm,
		    lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
190

191 192
	assert_tc_cold_blocked(dig_port);

A
Anusha Srivatsa 已提交
193 194
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
195
	val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
196 197 198

	switch (required_lanes) {
	case 1:
199 200 201
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
202 203
		break;
	case 2:
204 205 206
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
207 208
		break;
	case 4:
209
		val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
210 211 212 213 214
		break;
	default:
		MISSING_CASE(required_lanes);
	}

A
Anusha Srivatsa 已提交
215 216
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
217 218
}

219 220 221
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
				      u32 live_status_mask)
{
222
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
223 224 225 226 227 228 229 230 231 232 233 234
	u32 valid_hpd_mask;

	if (dig_port->tc_legacy_port)
		valid_hpd_mask = BIT(TC_PORT_LEGACY);
	else
		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
				 BIT(TC_PORT_TBT_ALT);

	if (!(live_status_mask & ~valid_hpd_mask))
		return;

	/* If live status mismatches the VBT flag, trust the live status. */
235 236 237
	drm_dbg_kms(&i915->drm,
		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
		    dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
238 239 240 241

	dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}

242
static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
243
{
244 245
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
246
	u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
247 248 249
	u32 mask = 0;
	u32 val;

A
Anusha Srivatsa 已提交
250 251
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
252

253
	if (val == 0xffffffff) {
254 255 256
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, nothing connected\n",
			    dig_port->tc_port_name);
257 258 259
		return mask;
	}

260
	if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
261
		mask |= BIT(TC_PORT_TBT_ALT);
262
	if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
263 264
		mask |= BIT(TC_PORT_DP_ALT);

265
	if (intel_uncore_read(uncore, SDEISR) & isr_bit)
266 267 268
		mask |= BIT(TC_PORT_LEGACY);

	/* The sink can be connected only in a single mode. */
269
	if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
270 271 272 273 274
		tc_port_fixup_legacy_flag(dig_port, mask);

	return mask;
}

275 276 277 278 279 280 281 282
static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
	u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
	struct intel_uncore *uncore = &i915->uncore;
	u32 val, mask = 0;

283 284 285 286 287
	/*
	 * On ADL-P HW/FW will wake from TCCOLD to complete the read access of
	 * registers in IOM. Note that this doesn't apply to PHY and FIA
	 * registers.
	 */
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
	val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
	if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
		mask |= BIT(TC_PORT_DP_ALT);
	if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
		mask |= BIT(TC_PORT_TBT_ALT);

	if (intel_uncore_read(uncore, SDEISR) & isr_bit)
		mask |= BIT(TC_PORT_LEGACY);

	/* The sink can be connected only in a single mode. */
	if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
		tc_port_fixup_legacy_flag(dig_port, mask);

	return mask;
}

static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	if (IS_ALDERLAKE_P(i915))
		return adl_tc_port_live_status_mask(dig_port);

	return icl_tc_port_live_status_mask(dig_port);
}

314 315 316 317 318 319 320 321
/*
 * Return the PHY status complete flag indicating that display can acquire the
 * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
 * is connected and it's ready to switch the ownership to display. The flag
 * will be left cleared when a TBT-alt sink is connected, where the PHY is
 * owned by the TBT subsystem and so switching the ownership to display is not
 * required.
 */
322 323
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
324 325
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
326 327
	u32 val;

A
Anusha Srivatsa 已提交
328 329
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
330
	if (val == 0xffffffff) {
331 332 333
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, assuming not complete\n",
			    dig_port->tc_port_name);
334 335
		return false;
	}
336

337
	return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
338 339
}

340 341 342 343 344 345 346
/*
 * Return the PHY status complete flag indicating that display can acquire the
 * PHY ownership. The IOM firmware sets this flag when it's ready to switch
 * the ownership to display, regardless of what sink is connected (TBT-alt,
 * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
 * subsystem and so switching the ownership to display is not required.
 */
347 348 349
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
350
	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
351 352 353
	struct intel_uncore *uncore = &i915->uncore;
	u32 val;

354
	val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
	if (val == 0xffffffff) {
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, assuming not complete\n",
			    dig_port->tc_port_name);
		return false;
	}

	return val & TCSS_DDI_STATUS_READY;
}

static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	if (IS_ALDERLAKE_P(i915))
		return adl_tc_phy_status_complete(dig_port);

	return icl_tc_phy_status_complete(dig_port);
}

375 376
static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
				      bool take)
377
{
378 379
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
380 381
	u32 val;

A
Anusha Srivatsa 已提交
382 383
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
384
	if (val == 0xffffffff) {
385
		drm_dbg_kms(&i915->drm,
386 387
			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
			    dig_port->tc_port_name, take ? "take" : "release");
388 389 390

		return false;
	}
391

392
	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
393
	if (take)
394
		val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
395

A
Anusha Srivatsa 已提交
396 397
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
398

399
	return true;
400 401
}

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
				      bool take)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
	enum port port = dig_port->base.port;
	u32 val;

	val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
	if (take)
		val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
	else
		val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
	intel_uncore_write(uncore, DDI_BUF_CTL(port), val);

	return true;
}

static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	if (IS_ALDERLAKE_P(i915))
		return adl_tc_phy_take_ownership(dig_port, take);

	return icl_tc_phy_take_ownership(dig_port, take);
}

430
static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
431
{
432 433
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
434 435
	u32 val;

A
Anusha Srivatsa 已提交
436 437
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
438
	if (val == 0xffffffff) {
439 440 441
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, assume safe mode\n",
			    dig_port->tc_port_name);
442 443 444
		return true;
	}

445
	return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
446 447
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
	enum port port = dig_port->base.port;
	u32 val;

	val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}

static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	if (IS_ALDERLAKE_P(i915))
		return adl_tc_phy_is_owned(dig_port);

	return icl_tc_phy_is_owned(dig_port);
}

469 470 471 472 473 474 475 476 477 478 479
/*
 * This function implements the first part of the Connect Flow described by our
 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
 * lanes, EDID, etc) is done as needed in the typical places.
 *
 * Unlike the other ports, type-C ports are not available to use as soon as we
 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
 * display, USB, etc. As a result, handshaking through FIA is required around
 * connect and disconnect to cleanly transfer ownership with the controller and
 * set the type-C power state.
 */
480 481
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
			       int required_lanes)
482
{
483
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
484
	u32 live_status_mask;
485 486
	int max_lanes;

487
	if (!tc_phy_status_complete(dig_port)) {
488 489
		drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
			    dig_port->tc_port_name);
490
		goto out_set_tbt_alt_mode;
491 492
	}

493 494 495 496 497 498 499
	live_status_mask = tc_port_live_status_mask(dig_port);
	if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY)))) {
		drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
			    dig_port->tc_port_name, live_status_mask);
		goto out_set_tbt_alt_mode;
	}

500
	if (!tc_phy_take_ownership(dig_port, true) &&
501
	    !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
502
		goto out_set_tbt_alt_mode;
503

504
	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
505
	if (dig_port->tc_legacy_port) {
506
		drm_WARN_ON(&i915->drm, max_lanes != 4);
507
		dig_port->tc_mode = TC_PORT_LEGACY;
508

509 510
		return;
	}
511 512 513 514 515

	/*
	 * Now we have to re-check the live state, in case the port recently
	 * became disconnected. Not necessary for legacy mode.
	 */
516
	if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
517 518
		drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
			    dig_port->tc_port_name);
519
		goto out_release_phy;
520 521
	}

522
	if (max_lanes < required_lanes) {
523 524 525 526
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY max lanes %d < required lanes %d\n",
			    dig_port->tc_port_name,
			    max_lanes, required_lanes);
527
		goto out_release_phy;
528 529
	}

530 531 532 533
	dig_port->tc_mode = TC_PORT_DP_ALT;

	return;

534
out_release_phy:
535
	tc_phy_take_ownership(dig_port, false);
536 537
out_set_tbt_alt_mode:
	dig_port->tc_mode = TC_PORT_TBT_ALT;
538 539 540 541 542 543
}

/*
 * See the comment at the connect function. This implements the Disconnect
 * Flow.
 */
544
static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
545
{
546 547 548
	switch (dig_port->tc_mode) {
	case TC_PORT_LEGACY:
	case TC_PORT_DP_ALT:
549
		tc_phy_take_ownership(dig_port, false);
550
		fallthrough;
551
	case TC_PORT_TBT_ALT:
552 553 554
		dig_port->tc_mode = TC_PORT_DISCONNECTED;
		fallthrough;
	case TC_PORT_DISCONNECTED:
555 556 557
		break;
	default:
		MISSING_CASE(dig_port->tc_mode);
558
	}
559
}
560

561 562
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
563 564
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

565
	if (!tc_phy_status_complete(dig_port)) {
566 567
		drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
			    dig_port->tc_port_name);
568 569 570
		return dig_port->tc_mode == TC_PORT_TBT_ALT;
	}

571 572 573 574
	/* On ADL-P the PHY complete flag is set in TBT mode as well. */
	if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
		return true;

575
	if (!tc_phy_is_owned(dig_port)) {
576
		drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
577
			    dig_port->tc_port_name);
578 579 580 581 582 583 584 585 586 587 588

		return false;
	}

	return dig_port->tc_mode == TC_PORT_DP_ALT ||
	       dig_port->tc_mode == TC_PORT_LEGACY;
}

static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
589
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
590 591 592
	u32 live_status_mask = tc_port_live_status_mask(dig_port);
	enum tc_port_mode mode;

593 594
	if (!tc_phy_is_owned(dig_port) ||
	    drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
595 596 597 598 599 600
		return TC_PORT_TBT_ALT;

	mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
	if (live_status_mask) {
		enum tc_port_mode live_mode = fls(live_status_mask) - 1;

601
		if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
602 603 604 605 606 607
			mode = live_mode;
	}

	return mode;
}

608 609 610 611 612 613 614 615
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
	u32 live_status_mask = tc_port_live_status_mask(dig_port);

	if (live_status_mask)
		return fls(live_status_mask) - 1;

616
	return TC_PORT_TBT_ALT;
617 618
}

619
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
620
				     int required_lanes, bool force_disconnect)
621
{
622
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
623
	enum tc_port_mode old_tc_mode = dig_port->tc_mode;
624

625
	intel_display_power_flush_work(i915);
626
	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
627 628 629 630 631 632 633
		enum intel_display_power_domain aux_domain;
		bool aux_powered;

		aux_domain = intel_aux_power_domain(dig_port);
		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
		drm_WARN_ON(&i915->drm, aux_powered);
	}
634

635
	icl_tc_phy_disconnect(dig_port);
636 637
	if (!force_disconnect)
		icl_tc_phy_connect(dig_port, required_lanes);
638

639 640 641 642
	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
		    dig_port->tc_port_name,
		    tc_port_mode_name(old_tc_mode),
		    tc_port_mode_name(dig_port->tc_mode));
643
}
644

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
{
	return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
}

static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
				      int required_lanes, bool force_disconnect)
{
	enum intel_display_power_domain domain;
	intel_wakeref_t wref;
	bool needs_reset = force_disconnect;

	if (!needs_reset) {
		/* Get power domain required to check the hotplug live status. */
		wref = tc_cold_block(dig_port, &domain);
		needs_reset = intel_tc_port_needs_reset(dig_port);
		tc_cold_unblock(dig_port, domain, wref);
	}

	if (!needs_reset)
		return;

	/* Get power domain required for resetting the mode. */
	wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain);

	intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect);

672 673 674 675 676 677 678
	/* Get power domain matching the new mode after reset. */
	tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
			fetch_and_zero(&dig_port->tc_lock_wakeref));
	if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
		dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
							  &dig_port->tc_lock_power_domain);

679 680 681
	tc_cold_unblock(dig_port, domain, wref);
}

682 683 684 685
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
				 int refcount)
{
686 687 688
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
689 690 691
	dig_port->tc_link_refcount = refcount;
}

692 693
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
694
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
695
	struct intel_encoder *encoder = &dig_port->base;
696 697
	intel_wakeref_t tc_cold_wref;
	enum intel_display_power_domain domain;
698 699
	int active_links = 0;

700 701
	mutex_lock(&dig_port->tc_lock);

702 703 704 705 706
	if (dig_port->dp.is_mst)
		active_links = intel_dp_mst_encoder_active_links(dig_port);
	else if (encoder->base.crtc)
		active_links = to_intel_crtc(encoder->base.crtc)->active;

707
	drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
708
	drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
709

710
	tc_cold_wref = tc_cold_block(dig_port, &domain);
711

712 713
	dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
	if (active_links) {
714
		if (!icl_tc_phy_is_connected(dig_port))
715 716 717
			drm_dbg_kms(&i915->drm,
				    "Port %s: PHY disconnected with %d active link(s)\n",
				    dig_port->tc_port_name, active_links);
718
		intel_tc_port_link_init_refcount(dig_port, active_links);
719

720 721
		dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
							  &dig_port->tc_lock_power_domain);
722 723 724 725 726 727 728 729 730 731 732 733 734
	} else {
		/*
		 * TBT-alt is the default mode in any case the PHY ownership is not
		 * held (regardless of the sink's connected live state), so
		 * we'll just switch to disconnected mode from it here without
		 * a note.
		 */
		if (dig_port->tc_mode != TC_PORT_TBT_ALT)
			drm_dbg_kms(&i915->drm,
				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
				    dig_port->tc_port_name,
				    tc_port_mode_name(dig_port->tc_mode));
		icl_tc_phy_disconnect(dig_port);
735 736
	}

737 738
	tc_cold_unblock(dig_port, domain, tc_cold_wref);

739 740 741
	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
		    dig_port->tc_port_name,
		    tc_port_mode_name(dig_port->tc_mode));
742 743

	mutex_unlock(&dig_port->tc_lock);
744 745
}

746 747 748 749 750 751 752 753 754 755
/*
 * The type-C ports are different because even when they are connected, they may
 * not be available/usable by the graphics driver: see the comment on
 * icl_tc_phy_connect(). So in our driver instead of adding the additional
 * concept of "usable" and make everything check for "connected and usable" we
 * define a port as "connected" when it is not only connected, but also when it
 * is usable by the rest of the driver. That maintains the old assumption that
 * connected ports are usable, and avoids exposing to the users objects they
 * can't really use.
 */
756
bool intel_tc_port_connected(struct intel_encoder *encoder)
757
{
758
	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
759 760
	bool is_connected;

761
	intel_tc_port_lock(dig_port);
762

763 764
	is_connected = tc_port_live_status_mask(dig_port) &
		       BIT(dig_port->tc_mode);
765

766
	intel_tc_port_unlock(dig_port);
767 768 769 770

	return is_connected;
}

771
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
772
				 int required_lanes)
773
{
774
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
775

776
	mutex_lock(&dig_port->tc_lock);
777

778
	cancel_delayed_work(&dig_port->tc_disconnect_phy_work);
779

780 781
	if (!dig_port->tc_link_refcount)
		intel_tc_port_update_mode(dig_port, required_lanes,
782
					  false);
783

784 785 786
	drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED);
	drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT &&
				!tc_phy_is_owned(dig_port));
787 788 789 790
}

void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
791
	__intel_tc_port_lock(dig_port, 1);
792 793
}

794 795 796 797 798 799 800 801 802
/**
 * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port
 * @dig_port: digital port
 *
 * Disconnect the given digital port from its TypeC PHY (handing back the
 * control of the PHY to the TypeC subsystem). This will happen in a delayed
 * manner after each aux transactions and modeset disables.
 */
static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
803
{
804 805
	struct intel_digital_port *dig_port =
		container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work);
806

807
	mutex_lock(&dig_port->tc_lock);
808

809 810 811 812
	if (!dig_port->tc_link_refcount)
		intel_tc_port_update_mode(dig_port, 1, true);

	mutex_unlock(&dig_port->tc_lock);
813 814
}

815
/**
816
 * intel_tc_port_flush_work: flush the work disconnecting the PHY
817 818
 * @dig_port: digital port
 *
819
 * Flush the delayed work disconnecting an idle PHY.
820
 */
821
void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
822
{
823 824 825 826 827 828 829 830 831 832
	flush_delayed_work(&dig_port->tc_disconnect_phy_work);
}

void intel_tc_port_unlock(struct intel_digital_port *dig_port)
{
	if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED)
		queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work,
				   msecs_to_jiffies(1000));

	mutex_unlock(&dig_port->tc_lock);
833 834
}

835 836 837 838 839 840
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
{
	return mutex_is_locked(&dig_port->tc_lock) ||
	       dig_port->tc_link_refcount;
}

841 842 843
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
			    int required_lanes)
{
844
	__intel_tc_port_lock(dig_port, required_lanes);
845 846 847 848 849 850
	dig_port->tc_link_refcount++;
	intel_tc_port_unlock(dig_port);
}

void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
851 852 853
	intel_tc_port_lock(dig_port);
	--dig_port->tc_link_refcount;
	intel_tc_port_unlock(dig_port);
854 855 856 857 858 859 860 861

	/*
	 * Disconnecting the PHY after the PHY's PLL gets disabled may
	 * hang the system on ADL-P, so disconnect the PHY here synchronously.
	 * TODO: remove this once the root cause of the ordering requirement
	 * is found/fixed.
	 */
	intel_tc_port_flush_work(dig_port);
862 863
}

864 865 866
static bool
tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
{
867
	enum intel_display_power_domain domain;
868 869 870 871 872 873
	intel_wakeref_t wakeref;
	u32 val;

	if (!INTEL_INFO(i915)->display.has_modular_fia)
		return false;

874
	mutex_lock(&dig_port->tc_lock);
875
	wakeref = tc_cold_block(dig_port, &domain);
876
	val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
877
	tc_cold_unblock(dig_port, domain, wakeref);
878
	mutex_unlock(&dig_port->tc_lock);
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903

	drm_WARN_ON(&i915->drm, val == 0xffffffff);

	return val & MODULAR_FIA_MASK;
}

static void
tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
{
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);

	/*
	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
	 * than two TC ports, there are multiple instances of Modular FIA.
	 */
	if (tc_has_modular_fia(i915, dig_port)) {
		dig_port->tc_phy_fia = tc_port / 2;
		dig_port->tc_phy_fia_idx = tc_port % 2;
	} else {
		dig_port->tc_phy_fia = FIA1;
		dig_port->tc_phy_fia_idx = tc_port;
	}
}

904 905 906 907 908 909
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);

V
Ville Syrjälä 已提交
910
	if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
911 912 913 914 915
		return;

	snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
		 "%c/TC#%d", port_name(port), tc_port + 1);

916
	mutex_init(&dig_port->tc_lock);
917
	INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work);
918
	dig_port->tc_legacy_port = is_legacy;
919
	dig_port->tc_mode = TC_PORT_DISCONNECTED;
920
	dig_port->tc_link_refcount = 0;
921
	tc_port_load_fia_params(i915, dig_port);
922
}