intel_tc.c 14.9 KB
Newer Older
1 2 3 4 5
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2019 Intel Corporation
 */

6
#include "i915_drv.h"
7
#include "intel_display.h"
8
#include "intel_display_types.h"
9
#include "intel_dp_mst.h"
10 11
#include "intel_tc.h"

12
static const char *tc_port_mode_name(enum tc_port_mode mode)
13 14
{
	static const char * const names[] = {
15 16
		[TC_PORT_TBT_ALT] = "tbt-alt",
		[TC_PORT_DP_ALT] = "dp-alt",
17 18 19
		[TC_PORT_LEGACY] = "legacy",
	};

20 21
	if (WARN_ON(mode >= ARRAY_SIZE(names)))
		mode = TC_PORT_TBT_ALT;
22

23
	return names[mode];
24 25
}

26 27 28
static void
tc_port_load_fia_params(struct drm_i915_private *i915,
			struct intel_digital_port *dig_port)
A
Anusha Srivatsa 已提交
29
{
30 31 32 33 34 35 36 37 38 39 40
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);
	u32 modular_fia;

	if (INTEL_INFO(i915)->display.has_modular_fia) {
		modular_fia = intel_uncore_read(&i915->uncore,
						PORT_TX_DFLEXDPSP(FIA1));
		modular_fia &= MODULAR_FIA_MASK;
	} else {
		modular_fia = 0;
	}
A
Anusha Srivatsa 已提交
41 42 43 44 45

	/*
	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
	 * than two TC ports, there are multiple instances of Modular FIA.
	 */
46 47 48 49 50 51 52
	if (modular_fia) {
		dig_port->tc_phy_fia = tc_port / 2;
		dig_port->tc_phy_fia_idx = tc_port % 2;
	} else {
		dig_port->tc_phy_fia = FIA1;
		dig_port->tc_phy_fia_idx = tc_port;
	}
A
Anusha Srivatsa 已提交
53 54
}

55
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
56
{
57 58
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
59 60
	u32 lane_mask;

A
Anusha Srivatsa 已提交
61 62
	lane_mask = intel_uncore_read(uncore,
				      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
63

64 65
	WARN_ON(lane_mask == 0xffffffff);

66 67
	lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
68 69 70 71
}

int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
{
72
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
73
	intel_wakeref_t wakeref;
74
	u32 lane_mask;
75

76
	if (dig_port->tc_mode != TC_PORT_DP_ALT)
77 78
		return 4;

79
	lane_mask = 0;
80
	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
81
		lane_mask = intel_tc_port_get_lane_mask(dig_port);
82

83
	switch (lane_mask) {
84
	default:
85
		MISSING_CASE(lane_mask);
86 87 88 89 90
		/* fall-through */
	case 0x1:
	case 0x2:
	case 0x4:
	case 0x8:
91
		return 1;
92 93
	case 0x3:
	case 0xc:
94
		return 2;
95
	case 0xf:
96 97 98 99
		return 4;
	}
}

100 101 102 103 104 105 106 107 108 109
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
				      int required_lanes)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
	struct intel_uncore *uncore = &i915->uncore;
	u32 val;

	WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);

A
Anusha Srivatsa 已提交
110 111
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
112
	val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
113 114 115

	switch (required_lanes) {
	case 1:
116 117 118
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
119 120
		break;
	case 2:
121 122 123
		val |= lane_reversal ?
			DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
			DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
124 125
		break;
	case 4:
126
		val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
127 128 129 130 131
		break;
	default:
		MISSING_CASE(required_lanes);
	}

A
Anusha Srivatsa 已提交
132 133
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
134 135
}

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
				      u32 live_status_mask)
{
	u32 valid_hpd_mask;

	if (dig_port->tc_legacy_port)
		valid_hpd_mask = BIT(TC_PORT_LEGACY);
	else
		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
				 BIT(TC_PORT_TBT_ALT);

	if (!(live_status_mask & ~valid_hpd_mask))
		return;

	/* If live status mismatches the VBT flag, trust the live status. */
	DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
		  dig_port->tc_port_name, live_status_mask);

	dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
}

static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
159 160 161
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
	struct intel_uncore *uncore = &i915->uncore;
162 163 164
	u32 mask = 0;
	u32 val;

A
Anusha Srivatsa 已提交
165 166
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
167

168 169 170 171 172 173
	if (val == 0xffffffff) {
		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
			      dig_port->tc_port_name);
		return mask;
	}

174
	if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
175
		mask |= BIT(TC_PORT_TBT_ALT);
176
	if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
177 178
		mask |= BIT(TC_PORT_DP_ALT);

179
	if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
180 181 182 183 184 185 186 187 188 189 190
		mask |= BIT(TC_PORT_LEGACY);

	/* The sink can be connected only in a single mode. */
	if (!WARN_ON(hweight32(mask) > 1))
		tc_port_fixup_legacy_flag(dig_port, mask);

	return mask;
}

static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
191 192
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
193 194
	u32 val;

A
Anusha Srivatsa 已提交
195 196
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
197 198 199 200 201
	if (val == 0xffffffff) {
		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
			      dig_port->tc_port_name);
		return false;
	}
202

203
	return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
204 205
}

206
static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
207 208
				     bool enable)
{
209 210
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
211 212
	u32 val;

A
Anusha Srivatsa 已提交
213 214
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
215 216 217 218 219 220 221
	if (val == 0xffffffff) {
		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
			      dig_port->tc_port_name,
			      enableddisabled(enable));

		return false;
	}
222

223
	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
224
	if (!enable)
225
		val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
226

A
Anusha Srivatsa 已提交
227 228
	intel_uncore_write(uncore,
			   PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
229 230 231 232

	if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
		DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
			      dig_port->tc_port_name);
233 234

	return true;
235 236
}

237 238
static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
{
239 240
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_uncore *uncore = &i915->uncore;
241 242
	u32 val;

A
Anusha Srivatsa 已提交
243 244
	val = intel_uncore_read(uncore,
				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
245 246 247 248 249 250
	if (val == 0xffffffff) {
		DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
			      dig_port->tc_port_name);
		return true;
	}

251
	return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx));
252 253
}

254 255 256 257 258 259 260 261 262 263 264
/*
 * This function implements the first part of the Connect Flow described by our
 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
 * lanes, EDID, etc) is done as needed in the typical places.
 *
 * Unlike the other ports, type-C ports are not available to use as soon as we
 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
 * display, USB, etc. As a result, handshaking through FIA is required around
 * connect and disconnect to cleanly transfer ownership with the controller and
 * set the type-C power state.
 */
265 266
static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
			       int required_lanes)
267
{
268 269
	int max_lanes;

270
	if (!icl_tc_phy_status_complete(dig_port)) {
271 272
		DRM_DEBUG_KMS("Port %s: PHY not ready\n",
			      dig_port->tc_port_name);
273
		goto out_set_tbt_alt_mode;
274 275
	}

276 277 278
	if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
	    !WARN_ON(dig_port->tc_legacy_port))
		goto out_set_tbt_alt_mode;
279

280
	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
281
	if (dig_port->tc_legacy_port) {
282
		WARN_ON(max_lanes != 4);
283
		dig_port->tc_mode = TC_PORT_LEGACY;
284

285 286
		return;
	}
287 288 289 290 291

	/*
	 * Now we have to re-check the live state, in case the port recently
	 * became disconnected. Not necessary for legacy mode.
	 */
292
	if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
293 294
		DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
			      dig_port->tc_port_name);
295
		goto out_set_safe_mode;
296 297
	}

298 299 300 301 302 303 304
	if (max_lanes < required_lanes) {
		DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
			      dig_port->tc_port_name,
			      max_lanes, required_lanes);
		goto out_set_safe_mode;
	}

305 306 307 308 309 310 311 312
	dig_port->tc_mode = TC_PORT_DP_ALT;

	return;

out_set_safe_mode:
	icl_tc_phy_set_safe_mode(dig_port, true);
out_set_tbt_alt_mode:
	dig_port->tc_mode = TC_PORT_TBT_ALT;
313 314 315 316 317 318
}

/*
 * See the comment at the connect function. This implements the Disconnect
 * Flow.
 */
319
static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
320
{
321 322
	switch (dig_port->tc_mode) {
	case TC_PORT_LEGACY:
323 324
		/* Nothing to do, we never disconnect from legacy mode */
		break;
325 326 327 328 329 330 331 332 333
	case TC_PORT_DP_ALT:
		icl_tc_phy_set_safe_mode(dig_port, true);
		dig_port->tc_mode = TC_PORT_TBT_ALT;
		break;
	case TC_PORT_TBT_ALT:
		/* Nothing to do, we stay in TBT-alt mode */
		break;
	default:
		MISSING_CASE(dig_port->tc_mode);
334
	}
335
}
336

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
{
	if (!icl_tc_phy_status_complete(dig_port)) {
		DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
			      dig_port->tc_port_name);
		return dig_port->tc_mode == TC_PORT_TBT_ALT;
	}

	if (icl_tc_phy_is_in_safe_mode(dig_port)) {
		DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
			      dig_port->tc_port_name);

		return false;
	}

	return dig_port->tc_mode == TC_PORT_DP_ALT ||
	       dig_port->tc_mode == TC_PORT_LEGACY;
}

static enum tc_port_mode
intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
{
	u32 live_status_mask = tc_port_live_status_mask(dig_port);
	bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
	enum tc_port_mode mode;

	if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
		return TC_PORT_TBT_ALT;

	mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
	if (live_status_mask) {
		enum tc_port_mode live_mode = fls(live_status_mask) - 1;

		if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
			mode = live_mode;
	}

	return mode;
}

377 378 379 380 381 382 383 384 385 386 387
static enum tc_port_mode
intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
{
	u32 live_status_mask = tc_port_live_status_mask(dig_port);

	if (live_status_mask)
		return fls(live_status_mask) - 1;

	return icl_tc_phy_status_complete(dig_port) &&
	       dig_port->tc_legacy_port ? TC_PORT_LEGACY :
					  TC_PORT_TBT_ALT;
388 389
}

390 391
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
				     int required_lanes)
392
{
393
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
394
	enum tc_port_mode old_tc_mode = dig_port->tc_mode;
395

396 397
	intel_display_power_flush_work(i915);
	WARN_ON(intel_display_power_is_enabled(i915,
398
					       intel_aux_power_domain(dig_port)));
399

400
	icl_tc_phy_disconnect(dig_port);
401
	icl_tc_phy_connect(dig_port, required_lanes);
402

403 404 405 406 407
	DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
		      dig_port->tc_port_name,
		      tc_port_mode_name(old_tc_mode),
		      tc_port_mode_name(dig_port->tc_mode));
}
408

409 410 411 412 413 414 415 416
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
				 int refcount)
{
	WARN_ON(dig_port->tc_link_refcount);
	dig_port->tc_link_refcount = refcount;
}

417 418 419 420 421
void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
	struct intel_encoder *encoder = &dig_port->base;
	int active_links = 0;

422 423
	mutex_lock(&dig_port->tc_lock);

424 425 426 427 428 429 430 431 432 433
	dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
	if (dig_port->dp.is_mst)
		active_links = intel_dp_mst_encoder_active_links(dig_port);
	else if (encoder->base.crtc)
		active_links = to_intel_crtc(encoder->base.crtc)->active;

	if (active_links) {
		if (!icl_tc_phy_is_connected(dig_port))
			DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
				      dig_port->tc_port_name, active_links);
434 435
		intel_tc_port_link_init_refcount(dig_port, active_links);

436 437 438 439
		goto out;
	}

	if (dig_port->tc_legacy_port)
440
		icl_tc_phy_connect(dig_port, 1);
441 442 443 444 445

out:
	DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
		      dig_port->tc_port_name,
		      tc_port_mode_name(dig_port->tc_mode));
446 447

	mutex_unlock(&dig_port->tc_lock);
448 449
}

450 451 452
static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
{
	return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
453 454 455 456 457 458 459 460 461 462 463 464 465 466
}

/*
 * The type-C ports are different because even when they are connected, they may
 * not be available/usable by the graphics driver: see the comment on
 * icl_tc_phy_connect(). So in our driver instead of adding the additional
 * concept of "usable" and make everything check for "connected and usable" we
 * define a port as "connected" when it is not only connected, but also when it
 * is usable by the rest of the driver. That maintains the old assumption that
 * connected ports are usable, and avoids exposing to the users objects they
 * can't really use.
 */
bool intel_tc_port_connected(struct intel_digital_port *dig_port)
{
467 468
	bool is_connected;

469
	intel_tc_port_lock(dig_port);
470 471
	is_connected = tc_port_live_status_mask(dig_port) &
		       BIT(dig_port->tc_mode);
472
	intel_tc_port_unlock(dig_port);
473 474 475 476

	return is_connected;
}

477 478
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
				 int required_lanes)
479
{
480
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
481 482
	intel_wakeref_t wakeref;

483
	wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
484

485
	mutex_lock(&dig_port->tc_lock);
486 487 488 489 490 491 492 493 494 495 496 497

	if (!dig_port->tc_link_refcount &&
	    intel_tc_port_needs_reset(dig_port))
		intel_tc_port_reset_mode(dig_port, required_lanes);

	WARN_ON(dig_port->tc_lock_wakeref);
	dig_port->tc_lock_wakeref = wakeref;
}

void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
	__intel_tc_port_lock(dig_port, 1);
498 499 500 501
}

void intel_tc_port_unlock(struct intel_digital_port *dig_port)
{
502
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
503 504 505 506
	intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);

	mutex_unlock(&dig_port->tc_lock);

507
	intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
508 509 510
				      wakeref);
}

511 512 513 514 515 516
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
{
	return mutex_is_locked(&dig_port->tc_lock) ||
	       dig_port->tc_link_refcount;
}

517 518 519 520 521 522 523 524 525 526 527 528
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
			    int required_lanes)
{
	__intel_tc_port_lock(dig_port, required_lanes);
	dig_port->tc_link_refcount++;
	intel_tc_port_unlock(dig_port);
}

void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
	mutex_lock(&dig_port->tc_lock);
	dig_port->tc_link_refcount--;
529
	mutex_unlock(&dig_port->tc_lock);
530 531
}

532 533 534 535 536 537 538 539 540 541 542 543
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	enum port port = dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(i915, port);

	if (WARN_ON(tc_port == PORT_TC_NONE))
		return;

	snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
		 "%c/TC#%d", port_name(port), tc_port + 1);

544
	mutex_init(&dig_port->tc_lock);
545
	dig_port->tc_legacy_port = is_legacy;
546
	dig_port->tc_link_refcount = 0;
547
	tc_port_load_fia_params(i915, dig_port);
548
}