intel_tc.c 18.6 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2019 Intel Corporation
 */

6
#include "i915_drv.h"
7
#include "intel_display.h"
8
#include "intel_display_types.h"
9
#include "intel_dp_mst.h"
10 11
#include "intel_tc.h"

12
static const char *tc_port_mode_name(enum tc_port_mode mode)
13 14
{
	static const char * const names[] = {
15 16
		[TC_PORT_TBT_ALT] = "tbt-alt",
		[TC_PORT_DP_ALT] = "dp-alt",
17 18 19
		[TC_PORT_LEGACY] = "legacy",
	};

20 21
	if (WARN_ON(mode >= ARRAY_SIZE(names)))
		mode = TC_PORT_TBT_ALT;
22

23
	return names[mode];
24 25
}

26 27 28 29 30 31 32 33 34 35 36
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	if (INTEL_GEN(i915) == 11)
		return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
	else
		return POWER_DOMAIN_TC_COLD_OFF;
}

37 38 39 40 41 42
static intel_wakeref_t
tc_cold_block(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum intel_display_power_domain domain;

43
	if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
44 45
		return 0;

46
	domain = tc_cold_get_power_domain(dig_port);
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
	return intel_display_power_get(i915, domain);
}

static void
tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum intel_display_power_domain domain;

	/*
	 * wakeref == -1, means some error happened saving save_depot_stack but
	 * power should still be put down and 0 is a invalid save_depot_stack
	 * id so can be used to skip it for non TC legacy ports.
	 */
	if (wakeref == 0)
		return;

64
	domain = tc_cold_get_power_domain(dig_port);
65 66 67
	intel_display_power_put_async(i915, domain, wakeref);
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81
static void
assert_tc_cold_blocked(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	bool enabled;

	if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port)
		return;

	enabled = intel_display_power_is_enabled(i915,
						 tc_cold_get_power_domain(dig_port));
	drm_WARN_ON(&i915->drm, !enabled);
}

82
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
83
{
84 85
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
86 87
	u32 lane_mask;

A
Anusha Srivatsa 已提交
88 89
	lane_mask = intel_uncore_read(uncore,
				      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
90

91
	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
92
	assert_tc_cold_blocked(dig_port);
93

94 95
	lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
96 97
}

98 99 100 101 102 103 104 105 106
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
	u32 pin_mask;

	pin_mask = intel_uncore_read(uncore,
				     PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));

107
	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
108
	assert_tc_cold_blocked(dig_port);
109 110 111 112 113

	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
	       DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
}

114 115
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
{
116
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
117
	intel_wakeref_t wakeref;
118
	u32 lane_mask;
119

120
	if (dig_port->tc_mode != TC_PORT_DP_ALT)
121 122
		return 4;

123 124
	assert_tc_cold_blocked(dig_port);

125
	lane_mask = 0;
126
	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
127
		lane_mask = intel_tc_port_get_lane_mask(dig_port);
128

129
	switch (lane_mask) {
130
	default:
131
		MISSING_CASE(lane_mask);
132
		fallthrough;
133 134 135 136
	case 0x1:
	case 0x2:
	case 0x4:
	case 0x8:
137
		return 1;
138 139
	case 0x3:
	case 0xc:
140
		return 2;
141
	case 0xf:
142 143 144 145
		return 4;
	}
}

146 147 148 149 150 151 152 153
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
				      int required_lanes)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
	struct intel_uncore *uncore = &i915->uncore;
	u32 val;

154 155
	drm_WARN_ON(&i915->drm,
		    lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
156

157 158
	assert_tc_cold_blocked(dig_port);

A
Anusha Srivatsa 已提交
159 160
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
161
	val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
162 163 164

	switch (required_lanes) {
	case 1:
165 166 167
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
168 169
		break;
	case 2:
170 171 172
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
173 174
		break;
	case 4:
175
		val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
176 177 178 179 180
		break;
	default:
		MISSING_CASE(required_lanes);
	}

A
Anusha Srivatsa 已提交
181 182
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
183 184
}

185 186 187
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
				      u32 live_status_mask)
{
188
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
189 190 191 192 193 194 195 196 197 198 199 200
	u32 valid_hpd_mask;

	if (dig_port->tc_legacy_port)
		valid_hpd_mask = BIT(TC_PORT_LEGACY);
	else
		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
				 BIT(TC_PORT_TBT_ALT);

	if (!(live_status_mask & ~valid_hpd_mask))
		return;

	/* If live status mismatches the VBT flag, trust the live status. */
201 202 203
	drm_dbg_kms(&i915->drm,
		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
		    dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
204 205 206 207 208 209

	dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}

static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
210 211
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
212
	u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
213 214 215
	u32 mask = 0;
	u32 val;

A
Anusha Srivatsa 已提交
216 217
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
218

219
	if (val == 0xffffffff) {
220 221 222
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, nothing connected\n",
			    dig_port->tc_port_name);
223 224 225
		return mask;
	}

226
	if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
227
		mask |= BIT(TC_PORT_TBT_ALT);
228
	if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
229 230
		mask |= BIT(TC_PORT_DP_ALT);

231
	if (intel_uncore_read(uncore, SDEISR) & isr_bit)
232 233 234
		mask |= BIT(TC_PORT_LEGACY);

	/* The sink can be connected only in a single mode. */
235
	if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
236 237 238 239 240 241 242
		tc_port_fixup_legacy_flag(dig_port, mask);

	return mask;
}

static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
243 244
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
245 246
	u32 val;

A
Anusha Srivatsa 已提交
247 248
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
249
	if (val == 0xffffffff) {
250 251 252
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, assuming not complete\n",
			    dig_port->tc_port_name);
253 254
		return false;
	}
255

256
	return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
257 258
}

259
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
260 261
				     bool enable)
{
262 263
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
264 265
	u32 val;

A
Anusha Srivatsa 已提交
266 267
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
268
	if (val == 0xffffffff) {
269 270
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
271
			    dig_port->tc_port_name, enableddisabled(enable));
272 273 274

		return false;
	}
275

276
	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
277
	if (!enable)
278
		val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
279

A
Anusha Srivatsa 已提交
280 281
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
282 283

	if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
284 285 286
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY complete clear timed out\n",
			    dig_port->tc_port_name);
287 288

	return true;
289 290
}

291 292
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
293 294
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
295 296
	u32 val;

A
Anusha Srivatsa 已提交
297 298
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
299
	if (val == 0xffffffff) {
300 301 302
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY in TCCOLD, assume safe mode\n",
			    dig_port->tc_port_name);
303 304 305
		return true;
	}

306
	return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
307 308
}

309 310 311 312 313 314 315 316 317 318 319
/*
 * This function implements the first part of the Connect Flow described by our
 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
 * lanes, EDID, etc) is done as needed in the typical places.
 *
 * Unlike the other ports, type-C ports are not available to use as soon as we
 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
 * display, USB, etc. As a result, handshaking through FIA is required around
 * connect and disconnect to cleanly transfer ownership with the controller and
 * set the type-C power state.
 */
320 321
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
			       int required_lanes)
322
{
323
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
324 325
	int max_lanes;

326
	if (!icl_tc_phy_status_complete(dig_port)) {
327 328
		drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
			    dig_port->tc_port_name);
329
		goto out_set_tbt_alt_mode;
330 331
	}

332
	if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
333
	    !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
334
		goto out_set_tbt_alt_mode;
335

336
	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
337
	if (dig_port->tc_legacy_port) {
338
		drm_WARN_ON(&i915->drm, max_lanes != 4);
339
		dig_port->tc_mode = TC_PORT_LEGACY;
340

341 342
		return;
	}
343 344 345 346 347

	/*
	 * Now we have to re-check the live state, in case the port recently
	 * became disconnected. Not necessary for legacy mode.
	 */
348
	if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
349 350
		drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
			    dig_port->tc_port_name);
351
		goto out_set_safe_mode;
352 353
	}

354
	if (max_lanes < required_lanes) {
355 356 357 358
		drm_dbg_kms(&i915->drm,
			    "Port %s: PHY max lanes %d < required lanes %d\n",
			    dig_port->tc_port_name,
			    max_lanes, required_lanes);
359 360 361
		goto out_set_safe_mode;
	}

362 363 364 365 366 367 368 369
	dig_port->tc_mode = TC_PORT_DP_ALT;

	return;

out_set_safe_mode:
	icl_tc_phy_set_safe_mode(dig_port, true);
out_set_tbt_alt_mode:
	dig_port->tc_mode = TC_PORT_TBT_ALT;
370 371 372 373 374 375
}

/*
 * See the comment at the connect function. This implements the Disconnect
 * Flow.
 */
376
static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
377
{
378 379
	switch (dig_port->tc_mode) {
	case TC_PORT_LEGACY:
380 381
		/* Nothing to do, we never disconnect from legacy mode */
		break;
382 383 384 385 386 387 388 389 390
	case TC_PORT_DP_ALT:
		icl_tc_phy_set_safe_mode(dig_port, true);
		dig_port->tc_mode = TC_PORT_TBT_ALT;
		break;
	case TC_PORT_TBT_ALT:
		/* Nothing to do, we stay in TBT-alt mode */
		break;
	default:
		MISSING_CASE(dig_port->tc_mode);
391
	}
392
}
393

394 395
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
396 397
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

398
	if (!icl_tc_phy_status_complete(dig_port)) {
399 400
		drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
			    dig_port->tc_port_name);
401 402 403 404
		return dig_port->tc_mode == TC_PORT_TBT_ALT;
	}

	if (icl_tc_phy_is_in_safe_mode(dig_port)) {
405 406
		drm_dbg_kms(&i915->drm, "Port %s: PHY still in safe mode\n",
			    dig_port->tc_port_name);
407 408 409 410 411 412 413 414 415 416 417

		return false;
	}

	return dig_port->tc_mode == TC_PORT_DP_ALT ||
	       dig_port->tc_mode == TC_PORT_LEGACY;
}

static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
418
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
419 420 421 422
	u32 live_status_mask = tc_port_live_status_mask(dig_port);
	bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
	enum tc_port_mode mode;

423 424
	if (in_safe_mode ||
	    drm_WARN_ON(&i915->drm, !icl_tc_phy_status_complete(dig_port)))
425 426 427 428 429 430
		return TC_PORT_TBT_ALT;

	mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
	if (live_status_mask) {
		enum tc_port_mode live_mode = fls(live_status_mask) - 1;

431
		if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
432 433 434 435 436 437
			mode = live_mode;
	}

	return mode;
}

438 439 440 441 442 443 444 445 446 447 448
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
	u32 live_status_mask = tc_port_live_status_mask(dig_port);

	if (live_status_mask)
		return fls(live_status_mask) - 1;

	return icl_tc_phy_status_complete(dig_port) &&
	       dig_port->tc_legacy_port ? TC_PORT_LEGACY :
					  TC_PORT_TBT_ALT;
449 450
}

451 452
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
				     int required_lanes)
453
{
454
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
455
	enum tc_port_mode old_tc_mode = dig_port->tc_mode;
456

457
	intel_display_power_flush_work(i915);
458 459 460 461 462 463 464 465
	if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) {
		enum intel_display_power_domain aux_domain;
		bool aux_powered;

		aux_domain = intel_aux_power_domain(dig_port);
		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
		drm_WARN_ON(&i915->drm, aux_powered);
	}
466

467
	icl_tc_phy_disconnect(dig_port);
468
	icl_tc_phy_connect(dig_port, required_lanes);
469

470 471 472 473
	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
		    dig_port->tc_port_name,
		    tc_port_mode_name(old_tc_mode),
		    tc_port_mode_name(dig_port->tc_mode));
474
}
475

476 477 478 479
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
				 int refcount)
{
480 481 482
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

	drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
483 484 485
	dig_port->tc_link_refcount = refcount;
}

486 487
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
488
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
489
	struct intel_encoder *encoder = &dig_port->base;
490
	intel_wakeref_t tc_cold_wref;
491 492
	int active_links = 0;

493
	mutex_lock(&dig_port->tc_lock);
494
	tc_cold_wref = tc_cold_block(dig_port);
495

496 497 498 499 500 501 502 503
	dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
	if (dig_port->dp.is_mst)
		active_links = intel_dp_mst_encoder_active_links(dig_port);
	else if (encoder->base.crtc)
		active_links = to_intel_crtc(encoder->base.crtc)->active;

	if (active_links) {
		if (!icl_tc_phy_is_connected(dig_port))
504 505 506
			drm_dbg_kms(&i915->drm,
				    "Port %s: PHY disconnected with %d active link(s)\n",
				    dig_port->tc_port_name, active_links);
507 508
		intel_tc_port_link_init_refcount(dig_port, active_links);

509 510 511 512
		goto out;
	}

	if (dig_port->tc_legacy_port)
513
		icl_tc_phy_connect(dig_port, 1);
514 515

out:
516 517 518
	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
		    dig_port->tc_port_name,
		    tc_port_mode_name(dig_port->tc_mode));
519

520
	tc_cold_unblock(dig_port, tc_cold_wref);
521
	mutex_unlock(&dig_port->tc_lock);
522 523
}

524 525 526
static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
{
	return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
527 528 529 530 531 532 533 534 535 536 537 538
}

/*
 * The type-C ports are different because even when they are connected, they may
 * not be available/usable by the graphics driver: see the comment on
 * icl_tc_phy_connect(). So in our driver instead of adding the additional
 * concept of "usable" and make everything check for "connected and usable" we
 * define a port as "connected" when it is not only connected, but also when it
 * is usable by the rest of the driver. That maintains the old assumption that
 * connected ports are usable, and avoids exposing to the users objects they
 * can't really use.
 */
539
bool intel_tc_port_connected(struct intel_encoder *encoder)
540
{
541
	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
542
	bool is_connected;
543
	intel_wakeref_t tc_cold_wref;
544

545
	intel_tc_port_lock(dig_port);
546 547
	tc_cold_wref = tc_cold_block(dig_port);

548 549
	is_connected = tc_port_live_status_mask(dig_port) &
		       BIT(dig_port->tc_mode);
550 551

	tc_cold_unblock(dig_port, tc_cold_wref);
552
	intel_tc_port_unlock(dig_port);
553 554 555 556

	return is_connected;
}

557 558
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
				 int required_lanes)
559
{
560
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
561 562
	intel_wakeref_t wakeref;

563
	wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
564

565
	mutex_lock(&dig_port->tc_lock);
566

567 568 569 570 571 572 573 574 575 576
	if (!dig_port->tc_link_refcount) {
		intel_wakeref_t tc_cold_wref;

		tc_cold_wref = tc_cold_block(dig_port);

		if (intel_tc_port_needs_reset(dig_port))
			intel_tc_port_reset_mode(dig_port, required_lanes);

		tc_cold_unblock(dig_port, tc_cold_wref);
	}
577

578
	drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
579 580 581 582 583 584
	dig_port->tc_lock_wakeref = wakeref;
}

void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
	__intel_tc_port_lock(dig_port, 1);
585 586 587 588
}

void intel_tc_port_unlock(struct intel_digital_port *dig_port)
{
589
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
590 591 592 593
	intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);

	mutex_unlock(&dig_port->tc_lock);

594
	intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
595 596 597
				      wakeref);
}

598 599 600 601 602 603
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
{
	return mutex_is_locked(&dig_port->tc_lock) ||
	       dig_port->tc_link_refcount;
}

604 605 606 607 608 609 610 611 612 613 614 615
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
			    int required_lanes)
{
	__intel_tc_port_lock(dig_port, required_lanes);
	dig_port->tc_link_refcount++;
	intel_tc_port_unlock(dig_port);
}

void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
	mutex_lock(&dig_port->tc_lock);
	dig_port->tc_link_refcount--;
616
	mutex_unlock(&dig_port->tc_lock);
617 618
}

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
static bool
tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
{
	intel_wakeref_t wakeref;
	u32 val;

	if (!INTEL_INFO(i915)->display.has_modular_fia)
		return false;

	wakeref = tc_cold_block(dig_port);
	val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
	tc_cold_unblock(dig_port, wakeref);

	drm_WARN_ON(&i915->drm, val == 0xffffffff);

	return val & MODULAR_FIA_MASK;
}

static void
tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
{
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);

	/*
	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
	 * than two TC ports, there are multiple instances of Modular FIA.
	 */
	if (tc_has_modular_fia(i915, dig_port)) {
		dig_port->tc_phy_fia = tc_port / 2;
		dig_port->tc_phy_fia_idx = tc_port % 2;
	} else {
		dig_port->tc_phy_fia = FIA1;
		dig_port->tc_phy_fia_idx = tc_port;
	}
}

656 657 658 659 660 661
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);

V
Ville Syrjälä 已提交
662
	if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
663 664 665 666 667
		return;

	snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
		 "%c/TC#%d", port_name(port), tc_port + 1);

668
	mutex_init(&dig_port->tc_lock);
669
	dig_port->tc_legacy_port = is_legacy;
670
	dig_port->tc_link_refcount = 0;
671
	tc_port_load_fia_params(i915, dig_port);
672
}