intel_dp.c 189.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Keith Packard <keithp@keithp.com>
 *
 */

#include <linux/i2c.h>
29
#include <linux/slab.h>
30
#include <linux/export.h>
31
#include <linux/types.h>
32 33
#include <linux/notifier.h>
#include <linux/reboot.h>
34
#include <asm/byteorder.h>
35
#include <drm/drmP.h>
36
#include <drm/drm_atomic_helper.h>
37 38
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
39
#include <drm/drm_dp_helper.h>
40
#include <drm/drm_edid.h>
41
#include <drm/drm_hdcp.h>
42
#include "intel_drv.h"
43
#include <drm/i915_drm.h>
44 45
#include "i915_drv.h"

46
#define DP_DPRX_ESI_LEN 14
47

48 49 50 51 52 53 54 55 56 57 58
/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER	61440

/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE			2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0		340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1		400000

/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
#define DP_DSC_FEC_OVERHEAD_FACTOR		976

59 60 61 62 63 64
/* Compliance test status bits  */
#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)

65
struct dp_link_dpll {
66
	int clock;
67 68 69
	struct dpll dpll;
};

70
static const struct dp_link_dpll g4x_dpll[] = {
71
	{ 162000,
72
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
73
	{ 270000,
74 75 76 77
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
};

static const struct dp_link_dpll pch_dpll[] = {
78
	{ 162000,
79
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
80
	{ 270000,
81 82 83
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
};

84
static const struct dp_link_dpll vlv_dpll[] = {
85
	{ 162000,
C
Chon Ming Lee 已提交
86
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
87
	{ 270000,
88 89 90
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};

91 92 93 94 95 96 97 98 99 100
/*
 * CHV supports eDP 1.4 that have  more link rates.
 * Below only provides the fixed rate but exclude variable rate.
 */
static const struct dp_link_dpll chv_dpll[] = {
	/*
	 * CHV requires to program fractional division for m2.
	 * m2 is stored in fixed point format using formula below
	 * (m2_int << 22) | m2_fraction
	 */
101
	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
102
		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
103
	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
104 105
		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
106

107 108 109 110 111 112 113 114
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};

/* With Single pipe configuration, HW is capable of supporting maximum
 * of 4 slices per line.
 */
static const u8 valid_dsc_slicecount[] = {1, 2, 4};

115
/**
116
 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
117 118 119 120 121
 * @intel_dp: DP struct
 *
 * If a CPU or PCH DP output is attached to an eDP panel, this function
 * will return true, and false otherwise.
 */
122
bool intel_dp_is_edp(struct intel_dp *intel_dp)
123
{
124 125 126
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);

	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
127 128
}

129 130
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
{
131
	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
132 133
}

134 135
static void intel_dp_link_down(struct intel_encoder *encoder,
			       const struct intel_crtc_state *old_crtc_state);
136
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
137
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
138 139
static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
					   const struct intel_crtc_state *crtc_state);
140
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
141
				      enum pipe pipe);
142
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
143

144 145 146
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
147
	static const int dp_rates[] = {
148
		162000, 270000, 540000, 810000
149
	};
150
	int i, max_rate;
151

152
	max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
153

154 155
	for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
		if (dp_rates[i] > max_rate)
156
			break;
157
		intel_dp->sink_rates[i] = dp_rates[i];
158
	}
159

160
	intel_dp->num_sink_rates = i;
161 162
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
/* Get length of rates array potentially limited by max_rate. */
static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
{
	int i;

	/* Limit results by potentially reduced max rate */
	for (i = 0; i < len; i++) {
		if (rates[len - i - 1] <= max_rate)
			return len - i;
	}

	return 0;
}

/* Get length of common rates array potentially limited by max_rate. */
static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
					  int max_rate)
{
	return intel_dp_rate_limit_len(intel_dp->common_rates,
				       intel_dp->num_common_rates, max_rate);
}

185 186
/* Theoretical max between source and sink */
static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
187
{
188
	return intel_dp->common_rates[intel_dp->num_common_rates - 1];
189 190
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
	u32 lane_info;

	if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
		return 4;

	lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
		     DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
		    DP_LANE_ASSIGNMENT_SHIFT(tc_port);

	switch (lane_info) {
	default:
		MISSING_CASE(lane_info);
	case 1:
	case 2:
	case 4:
	case 8:
		return 1;
	case 3:
	case 12:
		return 2;
	case 15:
		return 4;
	}
}

221 222
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
223 224
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
225 226
	int source_max = intel_dig_port->max_lanes;
	int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
227
	int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
228

229
	return min3(source_max, sink_max, fia_max);
230 231
}

232
int intel_dp_max_lane_count(struct intel_dp *intel_dp)
233 234 235 236
{
	return intel_dp->max_link_lane_count;
}

237
int
238
intel_dp_link_required(int pixel_clock, int bpp)
239
{
240 241
	/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
	return DIV_ROUND_UP(pixel_clock * bpp, 8);
242 243
}

244
int
245 246
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
247 248 249 250 251 252 253
	/* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
	 * link rate that is generally expressed in Gbps. Since, 8 bits of data
	 * is transmitted every LS_Clk per lane, there is no need to account for
	 * the channel encoding that is done in the PHY layer here.
	 */

	return max_link_clock * max_lanes;
254 255
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
static int
intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct intel_encoder *encoder = &intel_dig_port->base;
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	int max_dotclk = dev_priv->max_dotclk_freq;
	int ds_max_dotclk;

	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;

	if (type != DP_DS_PORT_TYPE_VGA)
		return max_dotclk;

	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
						    intel_dp->downstream_ports);

	if (ds_max_dotclk != 0)
		max_dotclk = min(max_dotclk, ds_max_dotclk);

	return max_dotclk;
}

279
static int cnl_max_source_rate(struct intel_dp *intel_dp)
280 281 282 283 284 285 286 287 288
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
	enum port port = dig_port->base.port;

	u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;

	/* Low voltage SKUs are limited to max of 5.4G */
	if (voltage == VOLTAGE_INFO_0_85V)
289
		return 540000;
290 291 292

	/* For this SKU 8.1G is supported in all ports */
	if (IS_CNL_WITH_PORT_F(dev_priv))
293
		return 810000;
294

295
	/* For other SKUs, max rate on ports A and D is 5.4G */
296
	if (port == PORT_A || port == PORT_D)
297
		return 540000;
298

299
	return 810000;
300 301
}

302 303 304 305 306 307 308 309 310 311 312
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum port port = dig_port->base.port;

	if (port == PORT_B)
		return 540000;

	return 810000;
}

313 314
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
315
{
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	/* The values must be in increasing order */
	static const int cnl_rates[] = {
		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
	};
	static const int bxt_rates[] = {
		162000, 216000, 243000, 270000, 324000, 432000, 540000
	};
	static const int skl_rates[] = {
		162000, 216000, 270000, 324000, 432000, 540000
	};
	static const int hsw_rates[] = {
		162000, 270000, 540000
	};
	static const int g4x_rates[] = {
		162000, 270000
	};
332 333
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
334 335
	const struct ddi_vbt_port_info *info =
		&dev_priv->vbt.ddi_port_info[dig_port->base.port];
336
	const int *source_rates;
337
	int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
338

339 340 341
	/* This should only be done once */
	WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);

342
	if (INTEL_GEN(dev_priv) >= 10) {
343
		source_rates = cnl_rates;
344
		size = ARRAY_SIZE(cnl_rates);
345
		if (IS_GEN10(dev_priv))
346 347 348
			max_rate = cnl_max_source_rate(intel_dp);
		else
			max_rate = icl_max_source_rate(intel_dp);
349 350 351
	} else if (IS_GEN9_LP(dev_priv)) {
		source_rates = bxt_rates;
		size = ARRAY_SIZE(bxt_rates);
352
	} else if (IS_GEN9_BC(dev_priv)) {
353
		source_rates = skl_rates;
354
		size = ARRAY_SIZE(skl_rates);
355 356
	} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
		   IS_BROADWELL(dev_priv)) {
357 358
		source_rates = hsw_rates;
		size = ARRAY_SIZE(hsw_rates);
359
	} else {
360 361
		source_rates = g4x_rates;
		size = ARRAY_SIZE(g4x_rates);
362 363
	}

364 365 366 367 368
	if (max_rate && vbt_max_rate)
		max_rate = min(max_rate, vbt_max_rate);
	else if (vbt_max_rate)
		max_rate = vbt_max_rate;

369 370 371
	if (max_rate)
		size = intel_dp_rate_limit_len(source_rates, size, max_rate);

372 373
	intel_dp->source_rates = source_rates;
	intel_dp->num_source_rates = size;
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
}

static int intersect_rates(const int *source_rates, int source_len,
			   const int *sink_rates, int sink_len,
			   int *common_rates)
{
	int i = 0, j = 0, k = 0;

	while (i < source_len && j < sink_len) {
		if (source_rates[i] == sink_rates[j]) {
			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
				return k;
			common_rates[k] = source_rates[i];
			++k;
			++i;
			++j;
		} else if (source_rates[i] < sink_rates[j]) {
			++i;
		} else {
			++j;
		}
	}
	return k;
}

399 400 401 402 403 404 405 406 407 408 409 410
/* return index of rate in rates array, or -1 if not found */
static int intel_dp_rate_index(const int *rates, int len, int rate)
{
	int i;

	for (i = 0; i < len; i++)
		if (rate == rates[i])
			return i;

	return -1;
}

411
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
412
{
413
	WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
414

415 416 417 418 419 420 421 422
	intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
						     intel_dp->num_source_rates,
						     intel_dp->sink_rates,
						     intel_dp->num_sink_rates,
						     intel_dp->common_rates);

	/* Paranoia, there should always be something in common. */
	if (WARN_ON(intel_dp->num_common_rates == 0)) {
423
		intel_dp->common_rates[0] = 162000;
424 425 426 427
		intel_dp->num_common_rates = 1;
	}
}

428 429
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
				       uint8_t lane_count)
430 431 432 433 434 435
{
	/*
	 * FIXME: we need to synchronize the current link parameters with
	 * hardware readout. Currently fast link training doesn't work on
	 * boot-up.
	 */
436 437
	if (link_rate == 0 ||
	    link_rate > intel_dp->max_link_rate)
438 439
		return false;

440 441
	if (lane_count == 0 ||
	    lane_count > intel_dp_max_lane_count(intel_dp))
442 443 444 445 446
		return false;

	return true;
}

447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
						     int link_rate,
						     uint8_t lane_count)
{
	const struct drm_display_mode *fixed_mode =
		intel_dp->attached_connector->panel.fixed_mode;
	int mode_rate, max_rate;

	mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
	max_rate = intel_dp_max_data_rate(link_rate, lane_count);
	if (mode_rate > max_rate)
		return false;

	return true;
}

463 464 465
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
					    int link_rate, uint8_t lane_count)
{
466
	int index;
467

468 469 470 471
	index = intel_dp_rate_index(intel_dp->common_rates,
				    intel_dp->num_common_rates,
				    link_rate);
	if (index > 0) {
472 473 474 475 476 477 478
		if (intel_dp_is_edp(intel_dp) &&
		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
							      intel_dp->common_rates[index - 1],
							      lane_count)) {
			DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
			return 0;
		}
479 480
		intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
		intel_dp->max_link_lane_count = lane_count;
481
	} else if (lane_count > 1) {
482 483 484 485 486 487 488
		if (intel_dp_is_edp(intel_dp) &&
		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
							      intel_dp_max_common_rate(intel_dp),
							      lane_count >> 1)) {
			DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
			return 0;
		}
489
		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
490
		intel_dp->max_link_lane_count = lane_count >> 1;
491 492 493 494 495 496 497 498
	} else {
		DRM_ERROR("Link Training Unsuccessful\n");
		return -1;
	}

	return 0;
}

499
static enum drm_mode_status
500 501 502
intel_dp_mode_valid(struct drm_connector *connector,
		    struct drm_display_mode *mode)
{
503
	struct intel_dp *intel_dp = intel_attached_dp(connector);
504 505
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
506
	struct drm_i915_private *dev_priv = to_i915(connector->dev);
507 508
	int target_clock = mode->clock;
	int max_rate, mode_rate, max_lanes, max_link_clock;
509
	int max_dotclk;
510 511
	u16 dsc_max_output_bpp = 0;
	u8 dsc_slice_count = 0;
512

513 514 515
	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
		return MODE_NO_DBLESCAN;

516
	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
517

518
	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
519
		if (mode->hdisplay > fixed_mode->hdisplay)
520 521
			return MODE_PANEL;

522
		if (mode->vdisplay > fixed_mode->vdisplay)
523
			return MODE_PANEL;
524 525

		target_clock = fixed_mode->clock;
526 527
	}

528
	max_link_clock = intel_dp_max_link_rate(intel_dp);
529
	max_lanes = intel_dp_max_lane_count(intel_dp);
530 531 532 533

	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
	mode_rate = intel_dp_link_required(target_clock, 18);

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
	/*
	 * Output bpp is stored in 6.4 format so right shift by 4 to get the
	 * integer value since we support only integer values of bpp.
	 */
	if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
	    drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
		if (intel_dp_is_edp(intel_dp)) {
			dsc_max_output_bpp =
				drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
			dsc_slice_count =
				drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
								true);
		} else {
			dsc_max_output_bpp =
				intel_dp_dsc_get_output_bpp(max_link_clock,
							    max_lanes,
							    target_clock,
							    mode->hdisplay) >> 4;
			dsc_slice_count =
				intel_dp_dsc_get_slice_count(intel_dp,
							     target_clock,
							     mode->hdisplay);
		}
	}

	if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
	    target_clock > max_dotclk)
561
		return MODE_CLOCK_HIGH;
562 563 564 565

	if (mode->clock < 10000)
		return MODE_CLOCK_LOW;

566 567 568
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
		return MODE_H_ILLEGAL;

569 570 571
	return MODE_OK;
}

572
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
573 574 575 576 577 578 579 580 581 582 583
{
	int	i;
	uint32_t v = 0;

	if (src_bytes > 4)
		src_bytes = 4;
	for (i = 0; i < src_bytes; i++)
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
	return v;
}

584
static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
585 586 587 588 589 590 591 592
{
	int i;
	if (dst_bytes > 4)
		dst_bytes = 4;
	for (i = 0; i < dst_bytes; i++)
		dst[i] = src >> ((3-i) * 8);
}

593
static void
594
intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
595
static void
596
intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
597
					      bool force_disable_vdd);
598
static void
599
intel_dp_pps_init(struct intel_dp *intel_dp);
600

601 602
static void pps_lock(struct intel_dp *intel_dp)
{
603
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
604 605

	/*
606
	 * See intel_power_sequencer_reset() why we need
607 608
	 * a power domain reference here.
	 */
609 610
	intel_display_power_get(dev_priv,
				intel_aux_power_domain(dp_to_dig_port(intel_dp)));
611 612 613 614 615 616

	mutex_lock(&dev_priv->pps_mutex);
}

static void pps_unlock(struct intel_dp *intel_dp)
{
617
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
618 619 620

	mutex_unlock(&dev_priv->pps_mutex);

621 622
	intel_display_power_put(dev_priv,
				intel_aux_power_domain(dp_to_dig_port(intel_dp)));
623 624
}

625 626 627
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
628
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
629 630
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	enum pipe pipe = intel_dp->pps_pipe;
631 632 633
	bool pll_enabled, release_cl_override = false;
	enum dpio_phy phy = DPIO_PHY(pipe);
	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
634 635 636
	uint32_t DP;

	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
637
		 "skipping pipe %c power sequencer kick due to port %c being active\n",
638
		 pipe_name(pipe), port_name(intel_dig_port->base.port)))
639 640 641
		return;

	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
642
		      pipe_name(pipe), port_name(intel_dig_port->base.port));
643 644 645 646 647 648 649 650 651

	/* Preserve the BIOS-computed detected bit. This is
	 * supposed to be read-only.
	 */
	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
	DP |= DP_PORT_WIDTH(1);
	DP |= DP_LINK_TRAIN_PAT_1;

652
	if (IS_CHERRYVIEW(dev_priv))
653 654 655
		DP |= DP_PIPE_SEL_CHV(pipe);
	else
		DP |= DP_PIPE_SEL(pipe);
656

657 658 659 660 661 662
	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;

	/*
	 * The DPLL for the pipe must be enabled for this to work.
	 * So enable temporarily it if it's not already enabled.
	 */
663
	if (!pll_enabled) {
664
		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
665 666
			!chv_phy_powergate_ch(dev_priv, phy, ch, true);

667
		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
668 669 670 671 672
				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
			DRM_ERROR("Failed to force on pll for pipe %c!\n",
				  pipe_name(pipe));
			return;
		}
673
	}
674

675 676 677
	/*
	 * Similar magic as in intel_dp_enable_port().
	 * We _must_ do this port enable + disable trick
678
	 * to make this power sequencer lock onto the port.
679 680 681 682 683 684 685 686 687 688
	 * Otherwise even VDD force bit won't work.
	 */
	I915_WRITE(intel_dp->output_reg, DP);
	POSTING_READ(intel_dp->output_reg);

	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
	POSTING_READ(intel_dp->output_reg);

	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
	POSTING_READ(intel_dp->output_reg);
689

690
	if (!pll_enabled) {
691
		vlv_force_pll_off(dev_priv, pipe);
692 693 694 695

		if (release_cl_override)
			chv_phy_powergate_ch(dev_priv, phy, ch, false);
	}
696 697
}

698 699 700 701 702 703 704 705 706
static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
{
	struct intel_encoder *encoder;
	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);

	/*
	 * We don't have power sequencer currently.
	 * Pick one that's not used by other ports.
	 */
707 708
	for_each_intel_dp(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729

		if (encoder->type == INTEL_OUTPUT_EDP) {
			WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
				intel_dp->active_pipe != intel_dp->pps_pipe);

			if (intel_dp->pps_pipe != INVALID_PIPE)
				pipes &= ~(1 << intel_dp->pps_pipe);
		} else {
			WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);

			if (intel_dp->active_pipe != INVALID_PIPE)
				pipes &= ~(1 << intel_dp->active_pipe);
		}
	}

	if (pipes == 0)
		return INVALID_PIPE;

	return ffs(pipes) - 1;
}

730 731 732
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
733
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
734
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
735
	enum pipe pipe;
736

V
Ville Syrjälä 已提交
737
	lockdep_assert_held(&dev_priv->pps_mutex);
738

739
	/* We should never land here with regular DP ports */
740
	WARN_ON(!intel_dp_is_edp(intel_dp));
741

742 743 744
	WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
		intel_dp->active_pipe != intel_dp->pps_pipe);

745 746 747
	if (intel_dp->pps_pipe != INVALID_PIPE)
		return intel_dp->pps_pipe;

748
	pipe = vlv_find_free_pps(dev_priv);
749 750 751 752 753

	/*
	 * Didn't find one. This should not happen since there
	 * are two power sequencers and up to two eDP ports.
	 */
754
	if (WARN_ON(pipe == INVALID_PIPE))
755
		pipe = PIPE_A;
756

757
	vlv_steal_power_sequencer(dev_priv, pipe);
758
	intel_dp->pps_pipe = pipe;
759 760 761

	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
		      pipe_name(intel_dp->pps_pipe),
762
		      port_name(intel_dig_port->base.port));
763 764

	/* init power sequencer on this pipe and port */
765 766
	intel_dp_init_panel_power_sequencer(intel_dp);
	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
767

768 769 770 771 772
	/*
	 * Even vdd force doesn't work until we've made
	 * the power sequencer lock in on the port.
	 */
	vlv_power_sequencer_kick(intel_dp);
773 774 775 776

	return intel_dp->pps_pipe;
}

777 778 779
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
780
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
781
	int backlight_controller = dev_priv->vbt.backlight.controller;
782 783 784 785

	lockdep_assert_held(&dev_priv->pps_mutex);

	/* We should never land here with regular DP ports */
786
	WARN_ON(!intel_dp_is_edp(intel_dp));
787 788

	if (!intel_dp->pps_reset)
789
		return backlight_controller;
790 791 792 793 794 795 796

	intel_dp->pps_reset = false;

	/*
	 * Only the HW needs to be reprogrammed, the SW state is fixed and
	 * has been setup during connector init.
	 */
797
	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
798

799
	return backlight_controller;
800 801
}

802 803 804 805 806 807
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
			       enum pipe pipe);

static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
			       enum pipe pipe)
{
808
	return I915_READ(PP_STATUS(pipe)) & PP_ON;
809 810 811 812 813
}

static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
				enum pipe pipe)
{
814
	return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
815 816 817 818 819 820 821
}

static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
			 enum pipe pipe)
{
	return true;
}
822

823
static enum pipe
824 825 826
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
		     enum port port,
		     vlv_pipe_check pipe_check)
827 828
{
	enum pipe pipe;
829 830

	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
831
		u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
832
			PANEL_PORT_SELECT_MASK;
833 834 835 836

		if (port_sel != PANEL_PORT_SELECT_VLV(port))
			continue;

837 838 839
		if (!pipe_check(dev_priv, pipe))
			continue;

840
		return pipe;
841 842
	}

843 844 845 846 847 848
	return INVALID_PIPE;
}

static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
849
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
850
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
851
	enum port port = intel_dig_port->base.port;
852 853 854 855

	lockdep_assert_held(&dev_priv->pps_mutex);

	/* try to find a pipe with this port selected */
856 857 858 859 860 861 862 863 864 865 866
	/* first pick one where the panel is on */
	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
						  vlv_pipe_has_pp_on);
	/* didn't find one? pick one where vdd is on */
	if (intel_dp->pps_pipe == INVALID_PIPE)
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
							  vlv_pipe_has_vdd_on);
	/* didn't find one? pick one with just the correct port */
	if (intel_dp->pps_pipe == INVALID_PIPE)
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
							  vlv_pipe_any);
867 868 869 870 871 872

	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
	if (intel_dp->pps_pipe == INVALID_PIPE) {
		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
			      port_name(port));
		return;
873 874
	}

875 876 877
	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
		      port_name(port), pipe_name(intel_dp->pps_pipe));

878 879
	intel_dp_init_panel_power_sequencer(intel_dp);
	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
880 881
}

882
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
883 884 885
{
	struct intel_encoder *encoder;

886
	if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
887
		    !IS_GEN9_LP(dev_priv)))
888 889 890 891 892 893 894 895 896 897 898 899
		return;

	/*
	 * We can't grab pps_mutex here due to deadlock with power_domain
	 * mutex when power_domain functions are called while holding pps_mutex.
	 * That also means that in order to use pps_pipe the code needs to
	 * hold both a power domain reference and pps_mutex, and the power domain
	 * reference get/put must be done while _not_ holding pps_mutex.
	 * pps_{lock,unlock}() do these steps in the correct order, so one
	 * should use them always.
	 */

900 901
	for_each_intel_dp(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
902

903 904 905 906 907
		WARN_ON(intel_dp->active_pipe != INVALID_PIPE);

		if (encoder->type != INTEL_OUTPUT_EDP)
			continue;

908
		if (IS_GEN9_LP(dev_priv))
909 910 911
			intel_dp->pps_reset = true;
		else
			intel_dp->pps_pipe = INVALID_PIPE;
912
	}
913 914
}

915 916 917 918 919 920 921 922
struct pps_registers {
	i915_reg_t pp_ctrl;
	i915_reg_t pp_stat;
	i915_reg_t pp_on;
	i915_reg_t pp_off;
	i915_reg_t pp_div;
};

923
static void intel_pps_get_registers(struct intel_dp *intel_dp,
924 925
				    struct pps_registers *regs)
{
926
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
927 928
	int pps_idx = 0;

929 930
	memset(regs, 0, sizeof(*regs));

931
	if (IS_GEN9_LP(dev_priv))
932 933 934
		pps_idx = bxt_power_sequencer_idx(intel_dp);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		pps_idx = vlv_power_sequencer_pipe(intel_dp);
935

936 937 938 939
	regs->pp_ctrl = PP_CONTROL(pps_idx);
	regs->pp_stat = PP_STATUS(pps_idx);
	regs->pp_on = PP_ON_DELAYS(pps_idx);
	regs->pp_off = PP_OFF_DELAYS(pps_idx);
940 941
	if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
	    !HAS_PCH_ICP(dev_priv))
942
		regs->pp_div = PP_DIVISOR(pps_idx);
943 944
}

945 946
static i915_reg_t
_pp_ctrl_reg(struct intel_dp *intel_dp)
947
{
948
	struct pps_registers regs;
949

950
	intel_pps_get_registers(intel_dp, &regs);
951 952

	return regs.pp_ctrl;
953 954
}

955 956
static i915_reg_t
_pp_stat_reg(struct intel_dp *intel_dp)
957
{
958
	struct pps_registers regs;
959

960
	intel_pps_get_registers(intel_dp, &regs);
961 962

	return regs.pp_stat;
963 964
}

965 966 967 968 969 970 971
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
   This function only applicable when panel PM state is not to be tracked */
static int edp_notify_handler(struct notifier_block *this, unsigned long code,
			      void *unused)
{
	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
						 edp_notifier);
972
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
973

974
	if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
975 976
		return 0;

977
	pps_lock(intel_dp);
V
Ville Syrjälä 已提交
978

979
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
V
Ville Syrjälä 已提交
980
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
981
		i915_reg_t pp_ctrl_reg, pp_div_reg;
982
		u32 pp_div;
V
Ville Syrjälä 已提交
983

984 985
		pp_ctrl_reg = PP_CONTROL(pipe);
		pp_div_reg  = PP_DIVISOR(pipe);
986 987 988 989 990 991 992 993 994
		pp_div = I915_READ(pp_div_reg);
		pp_div &= PP_REFERENCE_DIVIDER_MASK;

		/* 0x1F write to PP_DIV_REG sets max cycle delay */
		I915_WRITE(pp_div_reg, pp_div | 0x1F);
		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
		msleep(intel_dp->panel_power_cycle_delay);
	}

995
	pps_unlock(intel_dp);
V
Ville Syrjälä 已提交
996

997 998 999
	return 0;
}

1000
static bool edp_have_panel_power(struct intel_dp *intel_dp)
1001
{
1002
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1003

V
Ville Syrjälä 已提交
1004 1005
	lockdep_assert_held(&dev_priv->pps_mutex);

1006
	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1007 1008 1009
	    intel_dp->pps_pipe == INVALID_PIPE)
		return false;

1010
	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
1011 1012
}

1013
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1014
{
1015
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1016

V
Ville Syrjälä 已提交
1017 1018
	lockdep_assert_held(&dev_priv->pps_mutex);

1019
	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1020 1021 1022
	    intel_dp->pps_pipe == INVALID_PIPE)
		return false;

1023
	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1024 1025
}

1026 1027 1028
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
1029
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1030

1031
	if (!intel_dp_is_edp(intel_dp))
1032
		return;
1033

1034
	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1035 1036
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
1037 1038
			      I915_READ(_pp_stat_reg(intel_dp)),
			      I915_READ(_pp_ctrl_reg(intel_dp)));
1039 1040 1041
	}
}

1042
static uint32_t
1043
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1044
{
1045
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1046
	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1047 1048 1049
	uint32_t status;
	bool done;

1050
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1051 1052
	done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
				  msecs_to_jiffies_timeout(10));
1053
	if (!done)
1054
		DRM_ERROR("dp aux hw did not signal timeout!\n");
1055 1056 1057 1058 1059
#undef C

	return status;
}

1060
static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1061
{
1062
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1063

1064 1065 1066
	if (index)
		return 0;

1067 1068
	/*
	 * The clock divider is based off the hrawclk, and would like to run at
1069
	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1070
	 */
1071
	return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1072 1073 1074 1075
}

static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
1076
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1077
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1078 1079 1080 1081

	if (index)
		return 0;

1082 1083 1084 1085 1086
	/*
	 * The clock divider is based off the cdclk or PCH rawclk, and would
	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
	 * divide by 2000 and use that
	 */
1087
	if (dig_port->aux_ch == AUX_CH_A)
1088
		return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
1089 1090
	else
		return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
1091 1092 1093 1094
}

static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
1095
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1096
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1097

1098
	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1099
		/* Workaround for non-ULT HSW */
1100 1101 1102 1103 1104
		switch (index) {
		case 0: return 63;
		case 1: return 72;
		default: return 0;
		}
1105
	}
1106 1107

	return ilk_get_aux_clock_divider(intel_dp, index);
1108 1109
}

1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
	/*
	 * SKL doesn't need us to program the AUX clock divider (Hardware will
	 * derive the clock from CDCLK automatically). We still implement the
	 * get_aux_clock_divider vfunc to plug-in into the existing code.
	 */
	return index ? 0 : 1;
}

1120 1121 1122
static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
				     int send_bytes,
				     uint32_t aux_clock_divider)
1123 1124
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1125 1126
	struct drm_i915_private *dev_priv =
			to_i915(intel_dig_port->base.base.dev);
1127 1128
	uint32_t precharge, timeout;

1129
	if (IS_GEN6(dev_priv))
1130 1131 1132 1133
		precharge = 3;
	else
		precharge = 5;

1134
	if (IS_BROADWELL(dev_priv))
1135 1136 1137 1138 1139
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
	else
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;

	return DP_AUX_CH_CTL_SEND_BUSY |
1140
	       DP_AUX_CH_CTL_DONE |
1141
	       DP_AUX_CH_CTL_INTERRUPT |
1142
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
1143
	       timeout |
1144
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
1145 1146
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1147
	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1148 1149
}

1150 1151 1152 1153
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
				      int send_bytes,
				      uint32_t unused)
{
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	uint32_t ret;

	ret = DP_AUX_CH_CTL_SEND_BUSY |
	      DP_AUX_CH_CTL_DONE |
	      DP_AUX_CH_CTL_INTERRUPT |
	      DP_AUX_CH_CTL_TIME_OUT_ERROR |
	      DP_AUX_CH_CTL_TIME_OUT_MAX |
	      DP_AUX_CH_CTL_RECEIVE_ERROR |
	      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);

	if (intel_dig_port->tc_type == TC_PORT_TBT)
		ret |= DP_AUX_CH_CTL_TBT_IO;

	return ret;
1171 1172
}

1173
static int
1174 1175
intel_dp_aux_xfer(struct intel_dp *intel_dp,
		  const uint8_t *send, int send_bytes,
1176 1177
		  uint8_t *recv, int recv_size,
		  u32 aux_send_ctl_flags)
1178 1179
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1180 1181
	struct drm_i915_private *dev_priv =
			to_i915(intel_dig_port->base.base.dev);
1182
	i915_reg_t ch_ctl, ch_data[5];
1183
	uint32_t aux_clock_divider;
1184 1185
	int i, ret, recv_bytes;
	uint32_t status;
1186
	int try, clock = 0;
1187 1188
	bool vdd;

1189 1190 1191 1192
	ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);

1193
	pps_lock(intel_dp);
V
Ville Syrjälä 已提交
1194

1195 1196 1197 1198 1199 1200
	/*
	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
	 * In such cases we want to leave VDD enabled and it's up to upper layers
	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
	 * ourselves.
	 */
1201
	vdd = edp_panel_vdd_on(intel_dp);
1202 1203 1204 1205 1206 1207 1208 1209

	/* dp aux is extremely sensitive to irq latency, hence request the
	 * lowest possible wakeup latency and so prevent the cpu from going into
	 * deep sleep states.
	 */
	pm_qos_update_request(&dev_priv->pm_qos, 0);

	intel_dp_check_edp(intel_dp);
1210

1211 1212
	/* Try to wait for any previous AUX channel activity */
	for (try = 0; try < 3; try++) {
1213
		status = I915_READ_NOTRACE(ch_ctl);
1214 1215 1216 1217 1218 1219
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
			break;
		msleep(1);
	}

	if (try == 3) {
1220 1221 1222 1223 1224 1225 1226 1227 1228
		static u32 last_status = -1;
		const u32 status = I915_READ(ch_ctl);

		if (status != last_status) {
			WARN(1, "dp_aux_ch not started status 0x%08x\n",
			     status);
			last_status = status;
		}

1229 1230
		ret = -EBUSY;
		goto out;
1231 1232
	}

1233 1234 1235 1236 1237 1238
	/* Only 5 data registers! */
	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
		ret = -E2BIG;
		goto out;
	}

1239
	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1240 1241 1242 1243 1244
		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
							  send_bytes,
							  aux_clock_divider);

		send_ctl |= aux_send_ctl_flags;
1245

1246 1247 1248 1249
		/* Must try at least 3 times according to DP spec */
		for (try = 0; try < 5; try++) {
			/* Load the send data into the aux channel data registers */
			for (i = 0; i < send_bytes; i += 4)
1250
				I915_WRITE(ch_data[i >> 2],
1251 1252
					   intel_dp_pack_aux(send + i,
							     send_bytes - i));
1253 1254

			/* Send the command and wait for it to complete */
1255
			I915_WRITE(ch_ctl, send_ctl);
1256

1257
			status = intel_dp_aux_wait_done(intel_dp);
1258 1259 1260 1261 1262 1263 1264 1265

			/* Clear done status and any errors */
			I915_WRITE(ch_ctl,
				   status |
				   DP_AUX_CH_CTL_DONE |
				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
				   DP_AUX_CH_CTL_RECEIVE_ERROR);

1266 1267 1268 1269 1270
			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
			 *   400us delay required for errors and timeouts
			 *   Timeout errors from the HW already meet this
			 *   requirement so skip to next iteration
			 */
1271 1272 1273
			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
				continue;

1274 1275
			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
				usleep_range(400, 500);
1276
				continue;
1277
			}
1278
			if (status & DP_AUX_CH_CTL_DONE)
1279
				goto done;
1280
		}
1281 1282 1283
	}

	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1284
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1285 1286
		ret = -EBUSY;
		goto out;
1287 1288
	}

1289
done:
1290 1291 1292
	/* Check for timeout or receive error.
	 * Timeouts occur when the sink is not connected
	 */
1293
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1294
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1295 1296
		ret = -EIO;
		goto out;
1297
	}
1298 1299 1300

	/* Timeouts occur when the device isn't connected, so they're
	 * "normal" -- don't fill the kernel log with these */
1301
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1302
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1303 1304
		ret = -ETIMEDOUT;
		goto out;
1305 1306 1307 1308 1309
	}

	/* Unload any bytes sent back from the other side */
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322

	/*
	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
	 * We have no idea of what happened so we return -EBUSY so
	 * drm layer takes care for the necessary retries.
	 */
	if (recv_bytes == 0 || recv_bytes > 20) {
		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
			      recv_bytes);
		ret = -EBUSY;
		goto out;
	}

1323 1324
	if (recv_bytes > recv_size)
		recv_bytes = recv_size;
1325

1326
	for (i = 0; i < recv_bytes; i += 4)
1327
		intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
1328
				    recv + i, recv_bytes - i);
1329

1330 1331 1332 1333
	ret = recv_bytes;
out:
	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);

1334 1335 1336
	if (vdd)
		edp_panel_vdd_off(intel_dp, false);

1337
	pps_unlock(intel_dp);
V
Ville Syrjälä 已提交
1338

1339
	return ret;
1340 1341
}

1342 1343
#define BARE_ADDRESS_SIZE	3
#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354

static void
intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
		    const struct drm_dp_aux_msg *msg)
{
	txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
	txbuf[1] = (msg->address >> 8) & 0xff;
	txbuf[2] = msg->address & 0xff;
	txbuf[3] = msg->size - 1;
}

1355 1356
static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1357
{
1358 1359 1360
	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
	uint8_t txbuf[20], rxbuf[20];
	size_t txsize, rxsize;
1361 1362
	int ret;

1363
	intel_dp_aux_header(txbuf, msg);
1364

1365 1366 1367
	switch (msg->request & ~DP_AUX_I2C_MOT) {
	case DP_AUX_NATIVE_WRITE:
	case DP_AUX_I2C_WRITE:
1368
	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1369
		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1370
		rxsize = 2; /* 0 or 1 data bytes */
1371

1372 1373
		if (WARN_ON(txsize > 20))
			return -E2BIG;
1374

1375 1376
		WARN_ON(!msg->buffer != !msg->size);

1377 1378
		if (msg->buffer)
			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1379

1380
		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1381
					rxbuf, rxsize, 0);
1382 1383
		if (ret > 0) {
			msg->reply = rxbuf[0] >> 4;
1384

1385 1386 1387 1388 1389 1390 1391
			if (ret > 1) {
				/* Number of bytes written in a short write. */
				ret = clamp_t(int, rxbuf[1], 0, msg->size);
			} else {
				/* Return payload size. */
				ret = msg->size;
			}
1392 1393
		}
		break;
1394

1395 1396
	case DP_AUX_NATIVE_READ:
	case DP_AUX_I2C_READ:
1397
		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1398
		rxsize = msg->size + 1;
1399

1400 1401
		if (WARN_ON(rxsize > 20))
			return -E2BIG;
1402

1403
		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1404
					rxbuf, rxsize, 0);
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
		if (ret > 0) {
			msg->reply = rxbuf[0] >> 4;
			/*
			 * Assume happy day, and copy the data. The caller is
			 * expected to check msg->reply before touching it.
			 *
			 * Return payload size.
			 */
			ret--;
			memcpy(msg->buffer, rxbuf + 1, ret);
1415
		}
1416 1417 1418 1419 1420
		break;

	default:
		ret = -EINVAL;
		break;
1421
	}
1422

1423
	return ret;
1424 1425
}

1426

1427
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1428
{
1429
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1430 1431
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1432

1433 1434 1435 1436 1437
	switch (aux_ch) {
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
		return DP_AUX_CH_CTL(aux_ch);
1438
	default:
1439 1440
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_CTL(AUX_CH_B);
1441 1442 1443
	}
}

1444
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1445
{
1446
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1447 1448
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1449

1450 1451 1452 1453 1454
	switch (aux_ch) {
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
		return DP_AUX_CH_DATA(aux_ch, index);
1455
	default:
1456 1457
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_DATA(AUX_CH_B, index);
1458 1459 1460
	}
}

1461
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1462
{
1463
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1464 1465
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1466

1467 1468 1469 1470 1471 1472 1473
	switch (aux_ch) {
	case AUX_CH_A:
		return DP_AUX_CH_CTL(aux_ch);
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
		return PCH_DP_AUX_CH_CTL(aux_ch);
1474
	default:
1475 1476
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_CTL(AUX_CH_A);
1477 1478 1479
	}
}

1480
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1481
{
1482
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1483 1484
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1485

1486 1487 1488 1489 1490 1491 1492
	switch (aux_ch) {
	case AUX_CH_A:
		return DP_AUX_CH_DATA(aux_ch, index);
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
		return PCH_DP_AUX_CH_DATA(aux_ch, index);
1493
	default:
1494 1495
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_DATA(AUX_CH_A, index);
1496 1497 1498
	}
}

1499
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1500
{
1501
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1502 1503
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1504

1505 1506 1507 1508 1509
	switch (aux_ch) {
	case AUX_CH_A:
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
1510
	case AUX_CH_E:
1511 1512
	case AUX_CH_F:
		return DP_AUX_CH_CTL(aux_ch);
1513
	default:
1514 1515
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_CTL(AUX_CH_A);
1516 1517 1518
	}
}

1519
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1520
{
1521
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1522 1523
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1524

1525 1526 1527 1528 1529
	switch (aux_ch) {
	case AUX_CH_A:
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
1530
	case AUX_CH_E:
1531 1532
	case AUX_CH_F:
		return DP_AUX_CH_DATA(aux_ch, index);
1533
	default:
1534 1535
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_DATA(AUX_CH_A, index);
1536 1537 1538
	}
}

1539 1540 1541 1542 1543 1544 1545 1546
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
	kfree(intel_dp->aux.name);
}

static void
intel_dp_aux_init(struct intel_dp *intel_dp)
1547
{
1548
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1549 1550
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct intel_encoder *encoder = &dig_port->base;
1551

1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
	if (INTEL_GEN(dev_priv) >= 9) {
		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
	} else if (HAS_PCH_SPLIT(dev_priv)) {
		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
	} else {
		intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
	}
1562

1563 1564 1565 1566 1567 1568 1569 1570
	if (INTEL_GEN(dev_priv) >= 9)
		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
	else if (HAS_PCH_SPLIT(dev_priv))
		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
	else
		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1571

1572 1573 1574 1575
	if (INTEL_GEN(dev_priv) >= 9)
		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
	else
		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1576

1577
	drm_dp_aux_init(&intel_dp->aux);
1578

1579
	/* Failure to allocate our preferred name is not critical */
1580 1581
	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
				       port_name(encoder->port));
1582
	intel_dp->aux.transfer = intel_dp_aux_transfer;
1583 1584
}

1585
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1586
{
1587
	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1588

1589
	return max_rate >= 540000;
1590 1591
}

1592 1593 1594 1595 1596 1597 1598
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
{
	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];

	return max_rate >= 810000;
}

1599 1600
static void
intel_dp_set_clock(struct intel_encoder *encoder,
1601
		   struct intel_crtc_state *pipe_config)
1602
{
1603
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1604 1605
	const struct dp_link_dpll *divisor = NULL;
	int i, count = 0;
1606

1607
	if (IS_G4X(dev_priv)) {
1608 1609
		divisor = g4x_dpll;
		count = ARRAY_SIZE(g4x_dpll);
1610
	} else if (HAS_PCH_SPLIT(dev_priv)) {
1611 1612
		divisor = pch_dpll;
		count = ARRAY_SIZE(pch_dpll);
1613
	} else if (IS_CHERRYVIEW(dev_priv)) {
1614 1615
		divisor = chv_dpll;
		count = ARRAY_SIZE(chv_dpll);
1616
	} else if (IS_VALLEYVIEW(dev_priv)) {
1617 1618
		divisor = vlv_dpll;
		count = ARRAY_SIZE(vlv_dpll);
1619
	}
1620 1621 1622

	if (divisor && count) {
		for (i = 0; i < count; i++) {
1623
			if (pipe_config->port_clock == divisor[i].clock) {
1624 1625 1626 1627 1628
				pipe_config->dpll = divisor[i].dpll;
				pipe_config->clock_set = true;
				break;
			}
		}
1629 1630 1631
	}
}

1632 1633 1634 1635 1636 1637 1638 1639
static void snprintf_int_array(char *str, size_t len,
			       const int *array, int nelem)
{
	int i;

	str[0] = '\0';

	for (i = 0; i < nelem; i++) {
1640
		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
		if (r >= len)
			return;
		str += r;
		len -= r;
	}
}

static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
	char str[128]; /* FIXME: too big for stack? */

	if ((drm_debug & DRM_UT_KMS) == 0)
		return;

1655 1656
	snprintf_int_array(str, sizeof(str),
			   intel_dp->source_rates, intel_dp->num_source_rates);
1657 1658
	DRM_DEBUG_KMS("source rates: %s\n", str);

1659 1660
	snprintf_int_array(str, sizeof(str),
			   intel_dp->sink_rates, intel_dp->num_sink_rates);
1661 1662
	DRM_DEBUG_KMS("sink rates: %s\n", str);

1663 1664
	snprintf_int_array(str, sizeof(str),
			   intel_dp->common_rates, intel_dp->num_common_rates);
1665
	DRM_DEBUG_KMS("common rates: %s\n", str);
1666 1667
}

1668 1669 1670 1671 1672
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
	int len;

1673
	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1674 1675 1676
	if (WARN_ON(len <= 0))
		return 162000;

1677
	return intel_dp->common_rates[len - 1];
1678 1679
}

1680 1681
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
1682 1683
	int i = intel_dp_rate_index(intel_dp->sink_rates,
				    intel_dp->num_sink_rates, rate);
1684 1685 1686 1687 1688

	if (WARN_ON(i < 0))
		i = 0;

	return i;
1689 1690
}

1691 1692
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
			   uint8_t *link_bw, uint8_t *rate_select)
1693
{
1694 1695
	/* eDP 1.4 rate select method. */
	if (intel_dp->use_rate_select) {
1696 1697 1698 1699 1700 1701 1702 1703 1704
		*link_bw = 0;
		*rate_select =
			intel_dp_rate_select(intel_dp, port_clock);
	} else {
		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
		*rate_select = 0;
	}
}

1705 1706 1707 1708 1709 1710
struct link_config_limits {
	int min_clock, max_clock;
	int min_lane_count, max_lane_count;
	int min_bpp, max_bpp;
};

1711 1712
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
				struct intel_crtc_state *pipe_config)
1713
{
1714
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1715
	struct intel_connector *intel_connector = intel_dp->attached_connector;
1716 1717 1718 1719 1720 1721 1722 1723
	int bpp, bpc;

	bpp = pipe_config->pipe_bpp;
	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);

	if (bpc > 0)
		bpp = min(bpp, 3*bpc);

1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
	if (intel_dp_is_edp(intel_dp)) {
		/* Get bpp from vbt only for panels that dont have bpp in edid */
		if (intel_connector->base.display_info.bpc == 0 &&
		    dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
				      dev_priv->vbt.edp.bpp);
			bpp = dev_priv->vbt.edp.bpp;
		}
	}

1734 1735 1736
	return bpp;
}

1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
/* Adjust link config limits based on compliance test requests. */
static void
intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
				  struct intel_crtc_state *pipe_config,
				  struct link_config_limits *limits)
{
	/* For DP Compliance we override the computed bpp for the pipe */
	if (intel_dp->compliance.test_data.bpc != 0) {
		int bpp = 3 * intel_dp->compliance.test_data.bpc;

		limits->min_bpp = limits->max_bpp = bpp;
		pipe_config->dither_force_disable = bpp == 6 * 3;

		DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
	}

	/* Use values requested by Compliance Test Request */
	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
		int index;

		/* Validate the compliance test data since max values
		 * might have changed due to link train fallback.
		 */
		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
					       intel_dp->compliance.test_lane_count)) {
			index = intel_dp_rate_index(intel_dp->common_rates,
						    intel_dp->num_common_rates,
						    intel_dp->compliance.test_link_rate);
			if (index >= 0)
				limits->min_clock = limits->max_clock = index;
			limits->min_lane_count = limits->max_lane_count =
				intel_dp->compliance.test_lane_count;
		}
	}
}

1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
/* Optimize link config in order: max bpp, min clock, min lanes */
static bool
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
				  struct intel_crtc_state *pipe_config,
				  const struct link_config_limits *limits)
{
	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
	int bpp, clock, lane_count;
	int mode_rate, link_clock, link_avail;

	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
						   bpp);

		for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
			for (lane_count = limits->min_lane_count;
			     lane_count <= limits->max_lane_count;
			     lane_count <<= 1) {
				link_clock = intel_dp->common_rates[clock];
				link_avail = intel_dp_max_data_rate(link_clock,
								    lane_count);

				if (mode_rate <= link_avail) {
					pipe_config->lane_count = lane_count;
					pipe_config->pipe_bpp = bpp;
					pipe_config->port_clock = link_clock;

					return true;
				}
			}
		}
	}

	return false;
}

1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844
/* Optimize link config in order: max bpp, min lanes, min clock */
static bool
intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
				  struct intel_crtc_state *pipe_config,
				  const struct link_config_limits *limits)
{
	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
	int bpp, clock, lane_count;
	int mode_rate, link_clock, link_avail;

	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
						   bpp);

		for (lane_count = limits->min_lane_count;
		     lane_count <= limits->max_lane_count;
		     lane_count <<= 1) {
			for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
				link_clock = intel_dp->common_rates[clock];
				link_avail = intel_dp_max_data_rate(link_clock,
								    lane_count);

				if (mode_rate <= link_avail) {
					pipe_config->lane_count = lane_count;
					pipe_config->pipe_bpp = bpp;
					pipe_config->port_clock = link_clock;

					return true;
				}
			}
		}
	}

	return false;
}

1845 1846 1847
static bool
intel_dp_compute_link_config(struct intel_encoder *encoder,
			     struct intel_crtc_state *pipe_config)
1848
{
1849
	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1850
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1851
	struct link_config_limits limits;
1852
	int common_len;
1853

1854
	common_len = intel_dp_common_len_rate_limit(intel_dp,
1855
						    intel_dp->max_link_rate);
1856 1857

	/* No common link rates between source and sink */
1858
	WARN_ON(common_len <= 0);
1859

1860 1861 1862 1863 1864 1865 1866 1867
	limits.min_clock = 0;
	limits.max_clock = common_len - 1;

	limits.min_lane_count = 1;
	limits.max_lane_count = intel_dp_max_lane_count(intel_dp);

	limits.min_bpp = 6 * 3;
	limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1868

1869
	if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
1870 1871
		/*
		 * Use the maximum clock and number of lanes the eDP panel
1872 1873 1874 1875 1876 1877
		 * advertizes being capable of. The eDP 1.3 and earlier panels
		 * are generally designed to support only a single clock and
		 * lane configuration, and typically these values correspond to
		 * the native resolution of the panel. With eDP 1.4 rate select
		 * and DSC, this is decreasingly the case, and we need to be
		 * able to select less than maximum link config.
1878
		 */
1879 1880
		limits.min_lane_count = limits.max_lane_count;
		limits.min_clock = limits.max_clock;
1881
	}
1882

1883 1884
	intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);

1885 1886 1887 1888 1889 1890
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
		      "max rate %d max bpp %d pixel clock %iKHz\n",
		      limits.max_lane_count,
		      intel_dp->common_rates[limits.max_clock],
		      limits.max_bpp, adjusted_mode->crtc_clock);

1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
	if (intel_dp_is_edp(intel_dp)) {
		/*
		 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
		 * section A.1: "It is recommended that the minimum number of
		 * lanes be used, using the minimum link rate allowed for that
		 * lane configuration."
		 *
		 * Note that we use the max clock and lane count for eDP 1.3 and
		 * earlier, and fast vs. wide is irrelevant.
		 */
		if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config,
						       &limits))
			return false;
	} else {
		/* Optimize for slow and wide. */
		if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config,
						       &limits))
			return false;
	}
1910 1911

	DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
1912 1913 1914 1915 1916 1917 1918 1919
		      pipe_config->lane_count, pipe_config->port_clock,
		      pipe_config->pipe_bpp);

	DRM_DEBUG_KMS("DP link rate required %i available %i\n",
		      intel_dp_link_required(adjusted_mode->crtc_clock,
					     pipe_config->pipe_bpp),
		      intel_dp_max_data_rate(pipe_config->port_clock,
					     pipe_config->lane_count));
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931

	return true;
}

bool
intel_dp_compute_config(struct intel_encoder *encoder,
			struct intel_crtc_state *pipe_config,
			struct drm_connector_state *conn_state)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1932
	struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
1933 1934 1935 1936 1937
	enum port port = encoder->port;
	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
	struct intel_connector *intel_connector = intel_dp->attached_connector;
	struct intel_digital_connector_state *intel_conn_state =
		to_intel_digital_connector_state(conn_state);
1938 1939
	bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
					   DP_DPCD_QUIRK_CONSTANT_N);
1940 1941 1942 1943

	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
		pipe_config->has_pch_encoder = true;

1944
	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
1945 1946 1947
	if (lspcon->active)
		lspcon_ycbcr420_config(&intel_connector->base, pipe_config);

1948 1949 1950 1951 1952 1953 1954 1955 1956
	pipe_config->has_drrs = false;
	if (IS_G4X(dev_priv) || port == PORT_A)
		pipe_config->has_audio = false;
	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
		pipe_config->has_audio = intel_dp->has_audio;
	else
		pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;

	if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1957 1958
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
				       adjusted_mode);
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975

		if (INTEL_GEN(dev_priv) >= 9) {
			int ret;

			ret = skl_update_scaler_crtc(pipe_config);
			if (ret)
				return ret;
		}

		if (HAS_GMCH_DISPLAY(dev_priv))
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
						 conn_state->scaling_mode);
		else
			intel_pch_panel_fitting(intel_crtc, pipe_config,
						conn_state->scaling_mode);
	}

1976 1977 1978
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
		return false;

1979
	if (HAS_GMCH_DISPLAY(dev_priv) &&
1980 1981 1982 1983 1984 1985 1986 1987 1988
	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
		return false;

	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
		return false;

	if (!intel_dp_compute_link_config(encoder, pipe_config))
		return false;

1989
	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1990 1991 1992 1993 1994
		/*
		 * See:
		 * CEA-861-E - 5.1 Default Encoding Parameters
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
		 */
1995
		pipe_config->limited_color_range =
1996
			pipe_config->pipe_bpp != 18 &&
1997 1998
			drm_default_rgb_quant_range(adjusted_mode) ==
			HDMI_QUANTIZATION_RANGE_LIMITED;
1999 2000
	} else {
		pipe_config->limited_color_range =
2001
			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
2002 2003
	}

2004
	intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
2005 2006
			       adjusted_mode->crtc_clock,
			       pipe_config->port_clock,
2007
			       &pipe_config->dp_m_n,
2008
			       constant_n);
2009

2010
	if (intel_connector->panel.downclock_mode != NULL &&
2011
		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2012
			pipe_config->has_drrs = true;
2013 2014 2015 2016 2017
			intel_link_compute_m_n(pipe_config->pipe_bpp,
					       pipe_config->lane_count,
					       intel_connector->panel.downclock_mode->clock,
					       pipe_config->port_clock,
					       &pipe_config->dp_m2_n2,
2018
					       constant_n);
2019 2020
	}

2021
	if (!HAS_DDI(dev_priv))
2022
		intel_dp_set_clock(encoder, pipe_config);
2023

2024 2025
	intel_psr_compute_config(intel_dp, pipe_config);

2026
	return true;
2027 2028
}

2029
void intel_dp_set_link_params(struct intel_dp *intel_dp,
2030 2031
			      int link_rate, uint8_t lane_count,
			      bool link_mst)
2032
{
2033
	intel_dp->link_trained = false;
2034 2035 2036
	intel_dp->link_rate = link_rate;
	intel_dp->lane_count = lane_count;
	intel_dp->link_mst = link_mst;
2037 2038
}

2039
static void intel_dp_prepare(struct intel_encoder *encoder,
2040
			     const struct intel_crtc_state *pipe_config)
2041
{
2042
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2043
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2044
	enum port port = encoder->port;
2045
	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2046
	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2047

2048 2049 2050 2051
	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
				 pipe_config->lane_count,
				 intel_crtc_has_type(pipe_config,
						     INTEL_OUTPUT_DP_MST));
2052

2053
	/*
K
Keith Packard 已提交
2054
	 * There are four kinds of DP registers:
2055 2056
	 *
	 * 	IBX PCH
K
Keith Packard 已提交
2057 2058
	 * 	SNB CPU
	 *	IVB CPU
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
	 * 	CPT PCH
	 *
	 * IBX PCH and CPU are the same for almost everything,
	 * except that the CPU DP PLL is configured in this
	 * register
	 *
	 * CPT PCH is quite different, having many bits moved
	 * to the TRANS_DP_CTL register instead. That
	 * configuration happens (oddly) in ironlake_pch_enable
	 */
2069

2070 2071 2072 2073
	/* Preserve the BIOS-computed detected bit. This is
	 * supposed to be read-only.
	 */
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2074

2075 2076
	/* Handle DP bits in common between all three register formats */
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2077
	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2078

2079
	/* Split out the IBX/CPU vs CPT settings */
2080

2081
	if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
K
Keith Packard 已提交
2082 2083 2084 2085 2086 2087
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
			intel_dp->DP |= DP_SYNC_HS_HIGH;
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
			intel_dp->DP |= DP_SYNC_VS_HIGH;
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;

2088
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
K
Keith Packard 已提交
2089 2090
			intel_dp->DP |= DP_ENHANCED_FRAMING;

2091
		intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2092
	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2093 2094
		u32 trans_dp;

2095
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2096 2097 2098 2099 2100 2101 2102

		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
			trans_dp |= TRANS_DP_ENH_FRAMING;
		else
			trans_dp &= ~TRANS_DP_ENH_FRAMING;
		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
2103
	} else {
2104
		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2105
			intel_dp->DP |= DP_COLOR_RANGE_16_235;
2106 2107 2108 2109 2110 2111 2112

		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
			intel_dp->DP |= DP_SYNC_HS_HIGH;
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
			intel_dp->DP |= DP_SYNC_VS_HIGH;
		intel_dp->DP |= DP_LINK_TRAIN_OFF;

2113
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2114 2115
			intel_dp->DP |= DP_ENHANCED_FRAMING;

2116
		if (IS_CHERRYVIEW(dev_priv))
2117 2118 2119
			intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
		else
			intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2120
	}
2121 2122
}

2123 2124
#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2125

2126 2127
#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
2128

2129 2130
#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2131

2132
static void intel_pps_verify_state(struct intel_dp *intel_dp);
I
Imre Deak 已提交
2133

2134
static void wait_panel_status(struct intel_dp *intel_dp,
2135 2136
				       u32 mask,
				       u32 value)
2137
{
2138
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2139
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2140

V
Ville Syrjälä 已提交
2141 2142
	lockdep_assert_held(&dev_priv->pps_mutex);

2143
	intel_pps_verify_state(intel_dp);
I
Imre Deak 已提交
2144

2145 2146
	pp_stat_reg = _pp_stat_reg(intel_dp);
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2147

2148
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2149 2150 2151
			mask, value,
			I915_READ(pp_stat_reg),
			I915_READ(pp_ctrl_reg));
2152

2153 2154 2155
	if (intel_wait_for_register(dev_priv,
				    pp_stat_reg, mask, value,
				    5000))
2156
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2157 2158
				I915_READ(pp_stat_reg),
				I915_READ(pp_ctrl_reg));
2159 2160

	DRM_DEBUG_KMS("Wait complete\n");
2161
}
2162

2163
static void wait_panel_on(struct intel_dp *intel_dp)
2164 2165
{
	DRM_DEBUG_KMS("Wait for panel power on\n");
2166
	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2167 2168
}

2169
static void wait_panel_off(struct intel_dp *intel_dp)
2170 2171
{
	DRM_DEBUG_KMS("Wait for panel power off time\n");
2172
	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2173 2174
}

2175
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2176
{
2177 2178 2179
	ktime_t panel_power_on_time;
	s64 panel_power_off_duration;

2180
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
2181

2182 2183 2184 2185 2186
	/* take the difference of currrent time and panel power off time
	 * and then make panel wait for t11_t12 if needed. */
	panel_power_on_time = ktime_get_boottime();
	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);

2187 2188
	/* When we disable the VDD override bit last we have to do the manual
	 * wait. */
2189 2190 2191
	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
		wait_remaining_ms_from_jiffies(jiffies,
				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2192

2193
	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2194 2195
}

2196
static void wait_backlight_on(struct intel_dp *intel_dp)
2197 2198 2199 2200 2201
{
	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
				       intel_dp->backlight_on_delay);
}

2202
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2203 2204 2205 2206
{
	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
				       intel_dp->backlight_off_delay);
}
2207

2208 2209 2210 2211
/* Read the current pp_control value, unlocking the register if it
 * is locked
 */

2212
static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2213
{
2214
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2215
	u32 control;
2216

V
Ville Syrjälä 已提交
2217 2218
	lockdep_assert_held(&dev_priv->pps_mutex);

2219
	control = I915_READ(_pp_ctrl_reg(intel_dp));
2220 2221
	if (WARN_ON(!HAS_DDI(dev_priv) &&
		    (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2222 2223 2224
		control &= ~PANEL_UNLOCK_MASK;
		control |= PANEL_UNLOCK_REGS;
	}
2225
	return control;
2226 2227
}

2228 2229 2230 2231 2232
/*
 * Must be paired with edp_panel_vdd_off().
 * Must hold pps_mutex around the whole on/off sequence.
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
 */
2233
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2234
{
2235
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2236
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2237
	u32 pp;
2238
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2239
	bool need_to_disable = !intel_dp->want_panel_vdd;
2240

V
Ville Syrjälä 已提交
2241 2242
	lockdep_assert_held(&dev_priv->pps_mutex);

2243
	if (!intel_dp_is_edp(intel_dp))
2244
		return false;
2245

2246
	cancel_delayed_work(&intel_dp->panel_vdd_work);
2247
	intel_dp->want_panel_vdd = true;
2248

2249
	if (edp_have_panel_vdd(intel_dp))
2250
		return need_to_disable;
2251

2252 2253
	intel_display_power_get(dev_priv,
				intel_aux_power_domain(intel_dig_port));
2254

V
Ville Syrjälä 已提交
2255
	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2256
		      port_name(intel_dig_port->base.port));
2257

2258 2259
	if (!edp_have_panel_power(intel_dp))
		wait_panel_power_cycle(intel_dp);
2260

2261
	pp = ironlake_get_pp_control(intel_dp);
2262
	pp |= EDP_FORCE_VDD;
2263

2264 2265
	pp_stat_reg = _pp_stat_reg(intel_dp);
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2266 2267 2268 2269 2270

	I915_WRITE(pp_ctrl_reg, pp);
	POSTING_READ(pp_ctrl_reg);
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2271 2272 2273
	/*
	 * If the panel wasn't on, delay before accessing aux channel
	 */
2274
	if (!edp_have_panel_power(intel_dp)) {
V
Ville Syrjälä 已提交
2275
		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2276
			      port_name(intel_dig_port->base.port));
2277 2278
		msleep(intel_dp->panel_power_up_delay);
	}
2279 2280 2281 2282

	return need_to_disable;
}

2283 2284 2285 2286 2287 2288 2289
/*
 * Must be paired with intel_edp_panel_vdd_off() or
 * intel_edp_panel_off().
 * Nested calls to these functions are not allowed since
 * we drop the lock. Caller must use some higher level
 * locking to prevent nested calls from other threads.
 */
2290
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2291
{
2292
	bool vdd;
2293

2294
	if (!intel_dp_is_edp(intel_dp))
2295 2296
		return;

2297
	pps_lock(intel_dp);
2298
	vdd = edp_panel_vdd_on(intel_dp);
2299
	pps_unlock(intel_dp);
2300

R
Rob Clark 已提交
2301
	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2302
	     port_name(dp_to_dig_port(intel_dp)->base.port));
2303 2304
}

2305
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2306
{
2307
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2308 2309
	struct intel_digital_port *intel_dig_port =
		dp_to_dig_port(intel_dp);
2310
	u32 pp;
2311
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2312

V
Ville Syrjälä 已提交
2313
	lockdep_assert_held(&dev_priv->pps_mutex);
2314

2315
	WARN_ON(intel_dp->want_panel_vdd);
2316

2317
	if (!edp_have_panel_vdd(intel_dp))
2318
		return;
2319

V
Ville Syrjälä 已提交
2320
	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2321
		      port_name(intel_dig_port->base.port));
2322

2323 2324
	pp = ironlake_get_pp_control(intel_dp);
	pp &= ~EDP_FORCE_VDD;
2325

2326 2327
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
	pp_stat_reg = _pp_stat_reg(intel_dp);
2328

2329 2330
	I915_WRITE(pp_ctrl_reg, pp);
	POSTING_READ(pp_ctrl_reg);
P
Paulo Zanoni 已提交
2331

2332 2333 2334
	/* Make sure sequencer is idle before allowing subsequent activity */
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2335

2336
	if ((pp & PANEL_POWER_ON) == 0)
2337
		intel_dp->panel_power_off_time = ktime_get_boottime();
2338

2339 2340
	intel_display_power_put(dev_priv,
				intel_aux_power_domain(intel_dig_port));
2341
}
2342

2343
static void edp_panel_vdd_work(struct work_struct *__work)
2344 2345 2346 2347
{
	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
						 struct intel_dp, panel_vdd_work);

2348
	pps_lock(intel_dp);
2349 2350
	if (!intel_dp->want_panel_vdd)
		edp_panel_vdd_off_sync(intel_dp);
2351
	pps_unlock(intel_dp);
2352 2353
}

2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
	unsigned long delay;

	/*
	 * Queue the timer to fire a long time from now (relative to the power
	 * down delay) to keep the panel power up across a sequence of
	 * operations.
	 */
	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
}

2367 2368 2369 2370 2371
/*
 * Must be paired with edp_panel_vdd_on().
 * Must hold pps_mutex around the whole on/off sequence.
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
 */
2372
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2373
{
2374
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
V
Ville Syrjälä 已提交
2375 2376 2377

	lockdep_assert_held(&dev_priv->pps_mutex);

2378
	if (!intel_dp_is_edp(intel_dp))
2379
		return;
2380

R
Rob Clark 已提交
2381
	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2382
	     port_name(dp_to_dig_port(intel_dp)->base.port));
2383

2384 2385
	intel_dp->want_panel_vdd = false;

2386
	if (sync)
2387
		edp_panel_vdd_off_sync(intel_dp);
2388 2389
	else
		edp_panel_vdd_schedule_off(intel_dp);
2390 2391
}

2392
static void edp_panel_on(struct intel_dp *intel_dp)
2393
{
2394
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2395
	u32 pp;
2396
	i915_reg_t pp_ctrl_reg;
2397

2398 2399
	lockdep_assert_held(&dev_priv->pps_mutex);

2400
	if (!intel_dp_is_edp(intel_dp))
2401
		return;
2402

V
Ville Syrjälä 已提交
2403
	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2404
		      port_name(dp_to_dig_port(intel_dp)->base.port));
V
Ville Syrjälä 已提交
2405

2406 2407
	if (WARN(edp_have_panel_power(intel_dp),
		 "eDP port %c panel power already on\n",
2408
		 port_name(dp_to_dig_port(intel_dp)->base.port)))
2409
		return;
2410

2411
	wait_panel_power_cycle(intel_dp);
2412

2413
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2414
	pp = ironlake_get_pp_control(intel_dp);
2415
	if (IS_GEN5(dev_priv)) {
2416 2417
		/* ILK workaround: disable reset around power sequence */
		pp &= ~PANEL_POWER_RESET;
2418 2419
		I915_WRITE(pp_ctrl_reg, pp);
		POSTING_READ(pp_ctrl_reg);
2420
	}
2421

2422
	pp |= PANEL_POWER_ON;
2423
	if (!IS_GEN5(dev_priv))
2424 2425
		pp |= PANEL_POWER_RESET;

2426 2427
	I915_WRITE(pp_ctrl_reg, pp);
	POSTING_READ(pp_ctrl_reg);
2428

2429
	wait_panel_on(intel_dp);
2430
	intel_dp->last_power_on = jiffies;
2431

2432
	if (IS_GEN5(dev_priv)) {
2433
		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2434 2435
		I915_WRITE(pp_ctrl_reg, pp);
		POSTING_READ(pp_ctrl_reg);
2436
	}
2437
}
V
Ville Syrjälä 已提交
2438

2439 2440
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
2441
	if (!intel_dp_is_edp(intel_dp))
2442 2443 2444 2445
		return;

	pps_lock(intel_dp);
	edp_panel_on(intel_dp);
2446
	pps_unlock(intel_dp);
2447 2448
}

2449 2450

static void edp_panel_off(struct intel_dp *intel_dp)
2451
{
2452
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2453
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2454
	u32 pp;
2455
	i915_reg_t pp_ctrl_reg;
2456

2457 2458
	lockdep_assert_held(&dev_priv->pps_mutex);

2459
	if (!intel_dp_is_edp(intel_dp))
2460
		return;
2461

V
Ville Syrjälä 已提交
2462
	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2463
		      port_name(dig_port->base.port));
2464

V
Ville Syrjälä 已提交
2465
	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2466
	     port_name(dig_port->base.port));
2467

2468
	pp = ironlake_get_pp_control(intel_dp);
2469 2470
	/* We need to switch off panel power _and_ force vdd, for otherwise some
	 * panels get very unhappy and cease to work. */
2471
	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2472
		EDP_BLC_ENABLE);
2473

2474
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2475

2476 2477
	intel_dp->want_panel_vdd = false;

2478 2479
	I915_WRITE(pp_ctrl_reg, pp);
	POSTING_READ(pp_ctrl_reg);
2480

2481
	wait_panel_off(intel_dp);
2482
	intel_dp->panel_power_off_time = ktime_get_boottime();
2483 2484

	/* We got a reference when we enabled the VDD. */
2485
	intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
2486
}
V
Ville Syrjälä 已提交
2487

2488 2489
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
2490
	if (!intel_dp_is_edp(intel_dp))
2491
		return;
V
Ville Syrjälä 已提交
2492

2493 2494
	pps_lock(intel_dp);
	edp_panel_off(intel_dp);
2495
	pps_unlock(intel_dp);
2496 2497
}

2498 2499
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2500
{
2501
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2502
	u32 pp;
2503
	i915_reg_t pp_ctrl_reg;
2504

2505 2506 2507 2508 2509 2510
	/*
	 * If we enable the backlight right away following a panel power
	 * on, we may see slight flicker as the panel syncs with the eDP
	 * link.  So delay a bit to make sure the image is solid before
	 * allowing it to appear.
	 */
2511
	wait_backlight_on(intel_dp);
V
Ville Syrjälä 已提交
2512

2513
	pps_lock(intel_dp);
V
Ville Syrjälä 已提交
2514

2515
	pp = ironlake_get_pp_control(intel_dp);
2516
	pp |= EDP_BLC_ENABLE;
2517

2518
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2519 2520 2521

	I915_WRITE(pp_ctrl_reg, pp);
	POSTING_READ(pp_ctrl_reg);
V
Ville Syrjälä 已提交
2522

2523
	pps_unlock(intel_dp);
2524 2525
}

2526
/* Enable backlight PWM and backlight PP control. */
2527 2528
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
			    const struct drm_connector_state *conn_state)
2529
{
2530 2531
	struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);

2532
	if (!intel_dp_is_edp(intel_dp))
2533 2534 2535 2536
		return;

	DRM_DEBUG_KMS("\n");

2537
	intel_panel_enable_backlight(crtc_state, conn_state);
2538 2539 2540 2541 2542
	_intel_edp_backlight_on(intel_dp);
}

/* Disable backlight in the panel power control. */
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2543
{
2544
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2545
	u32 pp;
2546
	i915_reg_t pp_ctrl_reg;
2547

2548
	if (!intel_dp_is_edp(intel_dp))
2549 2550
		return;

2551
	pps_lock(intel_dp);
V
Ville Syrjälä 已提交
2552

2553
	pp = ironlake_get_pp_control(intel_dp);
2554
	pp &= ~EDP_BLC_ENABLE;
2555

2556
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2557 2558 2559

	I915_WRITE(pp_ctrl_reg, pp);
	POSTING_READ(pp_ctrl_reg);
2560

2561
	pps_unlock(intel_dp);
V
Ville Syrjälä 已提交
2562 2563

	intel_dp->last_backlight_off = jiffies;
2564
	edp_wait_backlight_off(intel_dp);
2565
}
2566

2567
/* Disable backlight PP control and backlight PWM. */
2568
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2569
{
2570 2571
	struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);

2572
	if (!intel_dp_is_edp(intel_dp))
2573 2574 2575
		return;

	DRM_DEBUG_KMS("\n");
2576

2577
	_intel_edp_backlight_off(intel_dp);
2578
	intel_panel_disable_backlight(old_conn_state);
2579
}
2580

2581 2582 2583 2584 2585 2586 2587 2588
/*
 * Hook for controlling the panel power control backlight through the bl_power
 * sysfs attribute. Take care to handle multiple calls.
 */
static void intel_edp_backlight_power(struct intel_connector *connector,
				      bool enable)
{
	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
V
Ville Syrjälä 已提交
2589 2590
	bool is_enabled;

2591
	pps_lock(intel_dp);
V
Ville Syrjälä 已提交
2592
	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2593
	pps_unlock(intel_dp);
2594 2595 2596 2597

	if (is_enabled == enable)
		return;

2598 2599
	DRM_DEBUG_KMS("panel power control backlight %s\n",
		      enable ? "enable" : "disable");
2600 2601 2602 2603 2604 2605 2606

	if (enable)
		_intel_edp_backlight_on(intel_dp);
	else
		_intel_edp_backlight_off(intel_dp);
}

2607 2608 2609 2610 2611 2612 2613 2614
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;

	I915_STATE_WARN(cur_state != state,
			"DP port %c state assertion failure (expected %s, current %s)\n",
2615
			port_name(dig_port->base.port),
2616
			onoff(state), onoff(cur_state));
2617 2618 2619 2620 2621 2622 2623 2624 2625
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)

static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;

	I915_STATE_WARN(cur_state != state,
			"eDP PLL state assertion failure (expected %s, current %s)\n",
2626
			onoff(state), onoff(cur_state));
2627 2628 2629 2630
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)

2631
static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2632
				const struct intel_crtc_state *pipe_config)
2633
{
2634
	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2635
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2636

2637 2638 2639
	assert_pipe_disabled(dev_priv, crtc->pipe);
	assert_dp_port_disabled(intel_dp);
	assert_edp_pll_disabled(dev_priv);
2640

2641
	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2642
		      pipe_config->port_clock);
2643 2644 2645

	intel_dp->DP &= ~DP_PLL_FREQ_MASK;

2646
	if (pipe_config->port_clock == 162000)
2647 2648 2649 2650 2651 2652 2653 2654
		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
	else
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;

	I915_WRITE(DP_A, intel_dp->DP);
	POSTING_READ(DP_A);
	udelay(500);

2655 2656 2657 2658 2659 2660 2661
	/*
	 * [DevILK] Work around required when enabling DP PLL
	 * while a pipe is enabled going to FDI:
	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
	 * 2. Program DP PLL enable
	 */
	if (IS_GEN5(dev_priv))
2662
		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2663

2664
	intel_dp->DP |= DP_PLL_ENABLE;
2665

2666
	I915_WRITE(DP_A, intel_dp->DP);
2667 2668
	POSTING_READ(DP_A);
	udelay(200);
2669 2670
}

2671 2672
static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
				 const struct intel_crtc_state *old_crtc_state)
2673
{
2674
	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2675
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2676

2677 2678 2679
	assert_pipe_disabled(dev_priv, crtc->pipe);
	assert_dp_port_disabled(intel_dp);
	assert_edp_pll_enabled(dev_priv);
2680

2681 2682
	DRM_DEBUG_KMS("disabling eDP PLL\n");

2683
	intel_dp->DP &= ~DP_PLL_ENABLE;
2684

2685
	I915_WRITE(DP_A, intel_dp->DP);
2686
	POSTING_READ(DP_A);
2687 2688 2689
	udelay(200);
}

2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
{
	/*
	 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
	 * be capable of signalling downstream hpd with a long pulse.
	 * Whether or not that means D3 is safe to use is not clear,
	 * but let's assume so until proven otherwise.
	 *
	 * FIXME should really check all downstream ports...
	 */
	return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
		intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}

2705
/* If the sink supports it, try to set the power state appropriately */
2706
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2707 2708 2709 2710 2711 2712 2713 2714
{
	int ret, i;

	/* Should have a valid DPCD by this point */
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
		return;

	if (mode != DRM_MODE_DPMS_ON) {
2715 2716 2717
		if (downstream_hpd_needs_d0(intel_dp))
			return;

2718 2719
		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
					 DP_SET_POWER_D3);
2720
	} else {
2721 2722
		struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);

2723 2724 2725 2726 2727
		/*
		 * When turning on, we need to retry for 1ms to give the sink
		 * time to wake up.
		 */
		for (i = 0; i < 3; i++) {
2728 2729
			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
						 DP_SET_POWER_D0);
2730 2731 2732 2733
			if (ret == 1)
				break;
			msleep(1);
		}
2734 2735 2736

		if (ret == 1 && lspcon->active)
			lspcon_wait_pcon_mode(lspcon);
2737
	}
2738 2739 2740 2741

	if (ret != 1)
		DRM_DEBUG_KMS("failed to %s sink power state\n",
			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2742 2743
}

2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789
static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
				 enum port port, enum pipe *pipe)
{
	enum pipe p;

	for_each_pipe(dev_priv, p) {
		u32 val = I915_READ(TRANS_DP_CTL(p));

		if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
			*pipe = p;
			return true;
		}
	}

	DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));

	/* must initialize pipe to something for the asserts */
	*pipe = PIPE_A;

	return false;
}

bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
			   i915_reg_t dp_reg, enum port port,
			   enum pipe *pipe)
{
	bool ret;
	u32 val;

	val = I915_READ(dp_reg);

	ret = val & DP_PORT_EN;

	/* asserts want to know the pipe even if the port is disabled */
	if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
		*pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
		ret &= cpt_dp_port_selected(dev_priv, port, pipe);
	else if (IS_CHERRYVIEW(dev_priv))
		*pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
	else
		*pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;

	return ret;
}

2790 2791
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
				  enum pipe *pipe)
2792
{
2793
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2794
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2795
	bool ret;
2796

2797 2798
	if (!intel_display_power_get_if_enabled(dev_priv,
						encoder->power_domain))
2799 2800
		return false;

2801 2802
	ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
				    encoder->port, pipe);
2803

2804
	intel_display_power_put(dev_priv, encoder->power_domain);
2805 2806

	return ret;
2807
}
2808

2809
static void intel_dp_get_config(struct intel_encoder *encoder,
2810
				struct intel_crtc_state *pipe_config)
2811
{
2812
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2813 2814
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
	u32 tmp, flags = 0;
2815
	enum port port = encoder->port;
2816
	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2817

2818 2819 2820 2821
	if (encoder->type == INTEL_OUTPUT_EDP)
		pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
	else
		pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
2822

2823
	tmp = I915_READ(intel_dp->output_reg);
2824 2825

	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2826

2827
	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2828 2829 2830
		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));

		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2831 2832 2833
			flags |= DRM_MODE_FLAG_PHSYNC;
		else
			flags |= DRM_MODE_FLAG_NHSYNC;
2834

2835
		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2836 2837 2838 2839
			flags |= DRM_MODE_FLAG_PVSYNC;
		else
			flags |= DRM_MODE_FLAG_NVSYNC;
	} else {
2840
		if (tmp & DP_SYNC_HS_HIGH)
2841 2842 2843
			flags |= DRM_MODE_FLAG_PHSYNC;
		else
			flags |= DRM_MODE_FLAG_NHSYNC;
2844

2845
		if (tmp & DP_SYNC_VS_HIGH)
2846 2847 2848 2849
			flags |= DRM_MODE_FLAG_PVSYNC;
		else
			flags |= DRM_MODE_FLAG_NVSYNC;
	}
2850

2851
	pipe_config->base.adjusted_mode.flags |= flags;
2852

2853
	if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2854 2855
		pipe_config->limited_color_range = true;

2856 2857 2858
	pipe_config->lane_count =
		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;

2859 2860
	intel_dp_get_m_n(crtc, pipe_config);

2861
	if (port == PORT_A) {
2862
		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2863 2864 2865 2866
			pipe_config->port_clock = 162000;
		else
			pipe_config->port_clock = 270000;
	}
2867

2868 2869 2870
	pipe_config->base.adjusted_mode.crtc_clock =
		intel_dotclock_calculate(pipe_config->port_clock,
					 &pipe_config->dp_m_n);
2871

2872
	if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2873
	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887
		/*
		 * This is a big fat ugly hack.
		 *
		 * Some machines in UEFI boot mode provide us a VBT that has 18
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
		 * unknown we fail to light up. Yet the same BIOS boots up with
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
		 * max, not what it tells us to use.
		 *
		 * Note: This will still be broken if the eDP panel is not lit
		 * up by the BIOS, and thus we can't get the mode at module
		 * load.
		 */
		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2888 2889
			      pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2890
	}
2891 2892
}

2893
static void intel_disable_dp(struct intel_encoder *encoder,
2894 2895
			     const struct intel_crtc_state *old_crtc_state,
			     const struct drm_connector_state *old_conn_state)
2896
{
2897
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2898

2899 2900
	intel_dp->link_trained = false;

2901
	if (old_crtc_state->has_audio)
2902 2903
		intel_audio_codec_disable(encoder,
					  old_crtc_state, old_conn_state);
2904 2905 2906

	/* Make sure the panel is off before trying to change the mode. But also
	 * ensure that we have vdd while we switch off the panel. */
2907
	intel_edp_panel_vdd_on(intel_dp);
2908
	intel_edp_backlight_off(old_conn_state);
2909
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2910
	intel_edp_panel_off(intel_dp);
2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
}

static void g4x_disable_dp(struct intel_encoder *encoder,
			   const struct intel_crtc_state *old_crtc_state,
			   const struct drm_connector_state *old_conn_state)
{
	intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}

static void vlv_disable_dp(struct intel_encoder *encoder,
			   const struct intel_crtc_state *old_crtc_state,
			   const struct drm_connector_state *old_conn_state)
{
	intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2925 2926
}

2927
static void g4x_post_disable_dp(struct intel_encoder *encoder,
2928 2929
				const struct intel_crtc_state *old_crtc_state,
				const struct drm_connector_state *old_conn_state)
2930
{
2931
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2932
	enum port port = encoder->port;
2933

2934 2935 2936 2937 2938 2939
	/*
	 * Bspec does not list a specific disable sequence for g4x DP.
	 * Follow the ilk+ sequence (disable pipe before the port) for
	 * g4x DP as it does not suffer from underruns like the normal
	 * g4x modeset sequence (disable pipe after the port).
	 */
2940
	intel_dp_link_down(encoder, old_crtc_state);
2941 2942

	/* Only ilk+ has port A */
2943
	if (port == PORT_A)
2944
		ironlake_edp_pll_off(intel_dp, old_crtc_state);
2945 2946
}

2947
static void vlv_post_disable_dp(struct intel_encoder *encoder,
2948 2949
				const struct intel_crtc_state *old_crtc_state,
				const struct drm_connector_state *old_conn_state)
2950
{
2951
	intel_dp_link_down(encoder, old_crtc_state);
2952 2953
}

2954
static void chv_post_disable_dp(struct intel_encoder *encoder,
2955 2956
				const struct intel_crtc_state *old_crtc_state,
				const struct drm_connector_state *old_conn_state)
2957
{
2958
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2959

2960
	intel_dp_link_down(encoder, old_crtc_state);
2961 2962 2963 2964

	mutex_lock(&dev_priv->sb_lock);

	/* Assert data lane reset */
2965
	chv_data_lane_soft_reset(encoder, old_crtc_state, true);
2966

V
Ville Syrjälä 已提交
2967
	mutex_unlock(&dev_priv->sb_lock);
2968 2969
}

2970 2971 2972 2973 2974
static void
_intel_dp_set_link_train(struct intel_dp *intel_dp,
			 uint32_t *DP,
			 uint8_t dp_train_pat)
{
2975
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2976
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2977
	enum port port = intel_dig_port->base.port;
2978
	uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
2979

2980
	if (dp_train_pat & train_pat_mask)
2981
		DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2982
			      dp_train_pat & train_pat_mask);
2983

2984
	if (HAS_DDI(dev_priv)) {
2985 2986 2987 2988 2989 2990 2991 2992
		uint32_t temp = I915_READ(DP_TP_CTL(port));

		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
		else
			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;

		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2993
		switch (dp_train_pat & train_pat_mask) {
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
		case DP_TRAINING_PATTERN_DISABLE:
			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;

			break;
		case DP_TRAINING_PATTERN_1:
			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
			break;
		case DP_TRAINING_PATTERN_2:
			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
			break;
		case DP_TRAINING_PATTERN_3:
			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
			break;
3007 3008 3009
		case DP_TRAINING_PATTERN_4:
			temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
			break;
3010 3011 3012
		}
		I915_WRITE(DP_TP_CTL(port), temp);

3013
	} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3014
		   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027
		*DP &= ~DP_LINK_TRAIN_MASK_CPT;

		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
		case DP_TRAINING_PATTERN_DISABLE:
			*DP |= DP_LINK_TRAIN_OFF_CPT;
			break;
		case DP_TRAINING_PATTERN_1:
			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
			break;
		case DP_TRAINING_PATTERN_2:
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
			break;
		case DP_TRAINING_PATTERN_3:
3028
			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3029 3030 3031 3032 3033
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
			break;
		}

	} else {
3034
		*DP &= ~DP_LINK_TRAIN_MASK;
3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046

		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
		case DP_TRAINING_PATTERN_DISABLE:
			*DP |= DP_LINK_TRAIN_OFF;
			break;
		case DP_TRAINING_PATTERN_1:
			*DP |= DP_LINK_TRAIN_PAT_1;
			break;
		case DP_TRAINING_PATTERN_2:
			*DP |= DP_LINK_TRAIN_PAT_2;
			break;
		case DP_TRAINING_PATTERN_3:
3047 3048
			DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
			*DP |= DP_LINK_TRAIN_PAT_2;
3049 3050 3051 3052 3053
			break;
		}
	}
}

3054
static void intel_dp_enable_port(struct intel_dp *intel_dp,
3055
				 const struct intel_crtc_state *old_crtc_state)
3056
{
3057
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3058 3059 3060

	/* enable with pattern 1 (as per spec) */

3061
	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3062 3063 3064 3065 3066 3067 3068 3069

	/*
	 * Magic for VLV/CHV. We _must_ first set up the register
	 * without actually enabling the port, and then do another
	 * write to enable the port. Otherwise link training will
	 * fail when the power sequencer is freshly used for this port.
	 */
	intel_dp->DP |= DP_PORT_EN;
3070
	if (old_crtc_state->has_audio)
3071
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3072 3073 3074

	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
	POSTING_READ(intel_dp->output_reg);
3075 3076
}

3077
static void intel_enable_dp(struct intel_encoder *encoder,
3078 3079
			    const struct intel_crtc_state *pipe_config,
			    const struct drm_connector_state *conn_state)
3080
{
3081
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3082
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3083
	struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
3084
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
3085
	enum pipe pipe = crtc->pipe;
3086

3087 3088
	if (WARN_ON(dp_reg & DP_PORT_EN))
		return;
3089

3090 3091
	pps_lock(intel_dp);

3092
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3093
		vlv_init_panel_power_sequencer(encoder, pipe_config);
3094

3095
	intel_dp_enable_port(intel_dp, pipe_config);
3096 3097 3098 3099 3100 3101 3102

	edp_panel_vdd_on(intel_dp);
	edp_panel_on(intel_dp);
	edp_panel_vdd_off(intel_dp, true);

	pps_unlock(intel_dp);

3103
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3104 3105
		unsigned int lane_mask = 0x0;

3106
		if (IS_CHERRYVIEW(dev_priv))
3107
			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3108

3109 3110
		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
				    lane_mask);
3111
	}
3112

3113
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3114
	intel_dp_start_link_train(intel_dp);
3115
	intel_dp_stop_link_train(intel_dp);
3116

3117
	if (pipe_config->has_audio) {
3118
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3119
				 pipe_name(pipe));
3120
		intel_audio_codec_enable(encoder, pipe_config, conn_state);
3121
	}
3122
}
3123

3124
static void g4x_enable_dp(struct intel_encoder *encoder,
3125 3126
			  const struct intel_crtc_state *pipe_config,
			  const struct drm_connector_state *conn_state)
3127
{
3128
	intel_enable_dp(encoder, pipe_config, conn_state);
3129
	intel_edp_backlight_on(pipe_config, conn_state);
3130
}
3131

3132
static void vlv_enable_dp(struct intel_encoder *encoder,
3133 3134
			  const struct intel_crtc_state *pipe_config,
			  const struct drm_connector_state *conn_state)
3135
{
3136
	intel_edp_backlight_on(pipe_config, conn_state);
3137 3138
}

3139
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3140 3141
			      const struct intel_crtc_state *pipe_config,
			      const struct drm_connector_state *conn_state)
3142 3143
{
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3144
	enum port port = encoder->port;
3145

3146
	intel_dp_prepare(encoder, pipe_config);
3147

3148
	/* Only ilk+ has port A */
3149
	if (port == PORT_A)
3150
		ironlake_edp_pll_on(intel_dp, pipe_config);
3151 3152
}

3153 3154 3155
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3156
	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3157
	enum pipe pipe = intel_dp->pps_pipe;
3158
	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3159

3160 3161
	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);

3162 3163 3164
	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
		return;

3165 3166 3167
	edp_panel_vdd_off_sync(intel_dp);

	/*
3168
	 * VLV seems to get confused when multiple power sequencers
3169 3170 3171
	 * have the same port selected (even if only one has power/vdd
	 * enabled). The failure manifests as vlv_wait_port_ready() failing
	 * CHV on the other hand doesn't seem to mind having the same port
3172
	 * selected in multiple power sequencers, but let's clear the
3173 3174 3175 3176
	 * port select always when logically disconnecting a power sequencer
	 * from a port.
	 */
	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3177
		      pipe_name(pipe), port_name(intel_dig_port->base.port));
3178 3179 3180 3181 3182 3183
	I915_WRITE(pp_on_reg, 0);
	POSTING_READ(pp_on_reg);

	intel_dp->pps_pipe = INVALID_PIPE;
}

3184
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3185 3186 3187 3188 3189 3190
				      enum pipe pipe)
{
	struct intel_encoder *encoder;

	lockdep_assert_held(&dev_priv->pps_mutex);

3191 3192 3193
	for_each_intel_dp(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
		enum port port = encoder->port;
3194

3195 3196 3197 3198
		WARN(intel_dp->active_pipe == pipe,
		     "stealing pipe %c power sequencer from active (e)DP port %c\n",
		     pipe_name(pipe), port_name(port));

3199 3200 3201 3202
		if (intel_dp->pps_pipe != pipe)
			continue;

		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3203
			      pipe_name(pipe), port_name(port));
3204 3205

		/* make sure vdd is off before we steal it */
3206
		vlv_detach_power_sequencer(intel_dp);
3207 3208 3209
	}
}

3210 3211
static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
					   const struct intel_crtc_state *crtc_state)
3212
{
3213
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3214 3215
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3216 3217 3218

	lockdep_assert_held(&dev_priv->pps_mutex);

3219
	WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3220

3221 3222 3223 3224 3225 3226 3227
	if (intel_dp->pps_pipe != INVALID_PIPE &&
	    intel_dp->pps_pipe != crtc->pipe) {
		/*
		 * If another power sequencer was being used on this
		 * port previously make sure to turn off vdd there while
		 * we still have control of it.
		 */
3228
		vlv_detach_power_sequencer(intel_dp);
3229
	}
3230 3231 3232 3233 3234

	/*
	 * We may be stealing the power
	 * sequencer from another port.
	 */
3235
	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3236

3237 3238
	intel_dp->active_pipe = crtc->pipe;

3239
	if (!intel_dp_is_edp(intel_dp))
3240 3241
		return;

3242 3243 3244 3245
	/* now it's all ours */
	intel_dp->pps_pipe = crtc->pipe;

	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3246
		      pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3247 3248

	/* init power sequencer on this pipe and port */
3249 3250
	intel_dp_init_panel_power_sequencer(intel_dp);
	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3251 3252
}

3253
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3254 3255
			      const struct intel_crtc_state *pipe_config,
			      const struct drm_connector_state *conn_state)
3256
{
3257
	vlv_phy_pre_encoder_enable(encoder, pipe_config);
3258

3259
	intel_enable_dp(encoder, pipe_config, conn_state);
3260 3261
}

3262
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3263 3264
				  const struct intel_crtc_state *pipe_config,
				  const struct drm_connector_state *conn_state)
3265
{
3266
	intel_dp_prepare(encoder, pipe_config);
3267

3268
	vlv_phy_pre_pll_enable(encoder, pipe_config);
3269 3270
}

3271
static void chv_pre_enable_dp(struct intel_encoder *encoder,
3272 3273
			      const struct intel_crtc_state *pipe_config,
			      const struct drm_connector_state *conn_state)
3274
{
3275
	chv_phy_pre_encoder_enable(encoder, pipe_config);
3276

3277
	intel_enable_dp(encoder, pipe_config, conn_state);
3278 3279

	/* Second common lane will stay alive on its own now */
3280
	chv_phy_release_cl2_override(encoder);
3281 3282
}

3283
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3284 3285
				  const struct intel_crtc_state *pipe_config,
				  const struct drm_connector_state *conn_state)
3286
{
3287
	intel_dp_prepare(encoder, pipe_config);
3288

3289
	chv_phy_pre_pll_enable(encoder, pipe_config);
3290 3291
}

3292
static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3293 3294
				    const struct intel_crtc_state *old_crtc_state,
				    const struct drm_connector_state *old_conn_state)
3295
{
3296
	chv_phy_post_pll_disable(encoder, old_crtc_state);
3297 3298
}

3299 3300 3301 3302
/*
 * Fetch AUX CH registers 0x202 - 0x207 which contain
 * link status information
 */
3303
bool
3304
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3305
{
3306 3307
	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3308 3309
}

3310
/* These are source-specific values. */
3311
uint8_t
K
Keith Packard 已提交
3312
intel_dp_voltage_max(struct intel_dp *intel_dp)
3313
{
3314
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3315 3316
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	enum port port = encoder->port;
K
Keith Packard 已提交
3317

3318
	if (HAS_DDI(dev_priv))
3319
		return intel_ddi_dp_voltage_max(encoder);
3320
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3321
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3322
	else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3323
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3324
	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3325
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
K
Keith Packard 已提交
3326
	else
3327
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
K
Keith Packard 已提交
3328 3329
}

3330
uint8_t
K
Keith Packard 已提交
3331 3332
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
3333
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3334 3335
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	enum port port = encoder->port;
K
Keith Packard 已提交
3336

3337 3338
	if (HAS_DDI(dev_priv)) {
		return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3339
	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3340
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3341 3342 3343 3344 3345 3346 3347
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3348
		default:
3349
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3350
		}
3351
	} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
K
Keith Packard 已提交
3352
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3353 3354 3355 3356 3357
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
K
Keith Packard 已提交
3358
		default:
3359
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
K
Keith Packard 已提交
3360 3361 3362
		}
	} else {
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3363 3364 3365 3366 3367 3368 3369
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
K
Keith Packard 已提交
3370
		default:
3371
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
K
Keith Packard 已提交
3372
		}
3373 3374 3375
	}
}

3376
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3377
{
3378
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3379 3380 3381 3382 3383
	unsigned long demph_reg_value, preemph_reg_value,
		uniqtranscale_reg_value;
	uint8_t train_set = intel_dp->train_set[0];

	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3384
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3385 3386
		preemph_reg_value = 0x0004000;
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3387
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3388 3389 3390
			demph_reg_value = 0x2B405555;
			uniqtranscale_reg_value = 0x552AB83A;
			break;
3391
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3392 3393 3394
			demph_reg_value = 0x2B404040;
			uniqtranscale_reg_value = 0x5548B83A;
			break;
3395
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3396 3397 3398
			demph_reg_value = 0x2B245555;
			uniqtranscale_reg_value = 0x5560B83A;
			break;
3399
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3400 3401 3402 3403 3404 3405 3406
			demph_reg_value = 0x2B405555;
			uniqtranscale_reg_value = 0x5598DA3A;
			break;
		default:
			return 0;
		}
		break;
3407
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3408 3409
		preemph_reg_value = 0x0002000;
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3410
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3411 3412 3413
			demph_reg_value = 0x2B404040;
			uniqtranscale_reg_value = 0x5552B83A;
			break;
3414
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3415 3416 3417
			demph_reg_value = 0x2B404848;
			uniqtranscale_reg_value = 0x5580B83A;
			break;
3418
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3419 3420 3421 3422 3423 3424 3425
			demph_reg_value = 0x2B404040;
			uniqtranscale_reg_value = 0x55ADDA3A;
			break;
		default:
			return 0;
		}
		break;
3426
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3427 3428
		preemph_reg_value = 0x0000000;
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3429
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3430 3431 3432
			demph_reg_value = 0x2B305555;
			uniqtranscale_reg_value = 0x5570B83A;
			break;
3433
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3434 3435 3436 3437 3438 3439 3440
			demph_reg_value = 0x2B2B4040;
			uniqtranscale_reg_value = 0x55ADDA3A;
			break;
		default:
			return 0;
		}
		break;
3441
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3442 3443
		preemph_reg_value = 0x0006000;
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3444
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
			demph_reg_value = 0x1B405555;
			uniqtranscale_reg_value = 0x55ADDA3A;
			break;
		default:
			return 0;
		}
		break;
	default:
		return 0;
	}

3456 3457
	vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
				 uniqtranscale_reg_value, 0);
3458 3459 3460 3461

	return 0;
}

3462
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3463
{
3464 3465 3466
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	u32 deemph_reg_value, margin_reg_value;
	bool uniq_trans_scale = false;
3467 3468 3469
	uint8_t train_set = intel_dp->train_set[0];

	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3470
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3471
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3472
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3473 3474 3475
			deemph_reg_value = 128;
			margin_reg_value = 52;
			break;
3476
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3477 3478 3479
			deemph_reg_value = 128;
			margin_reg_value = 77;
			break;
3480
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3481 3482 3483
			deemph_reg_value = 128;
			margin_reg_value = 102;
			break;
3484
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3485 3486
			deemph_reg_value = 128;
			margin_reg_value = 154;
3487
			uniq_trans_scale = true;
3488 3489 3490 3491 3492
			break;
		default:
			return 0;
		}
		break;
3493
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3494
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3495
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3496 3497 3498
			deemph_reg_value = 85;
			margin_reg_value = 78;
			break;
3499
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3500 3501 3502
			deemph_reg_value = 85;
			margin_reg_value = 116;
			break;
3503
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3504 3505 3506 3507 3508 3509 3510
			deemph_reg_value = 85;
			margin_reg_value = 154;
			break;
		default:
			return 0;
		}
		break;
3511
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3512
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3513
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3514 3515 3516
			deemph_reg_value = 64;
			margin_reg_value = 104;
			break;
3517
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3518 3519 3520 3521 3522 3523 3524
			deemph_reg_value = 64;
			margin_reg_value = 154;
			break;
		default:
			return 0;
		}
		break;
3525
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3526
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3527
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538
			deemph_reg_value = 43;
			margin_reg_value = 154;
			break;
		default:
			return 0;
		}
		break;
	default:
		return 0;
	}

3539 3540
	chv_set_phy_signal_level(encoder, deemph_reg_value,
				 margin_reg_value, uniq_trans_scale);
3541 3542 3543 3544

	return 0;
}

3545
static uint32_t
3546
g4x_signal_levels(uint8_t train_set)
3547
{
3548
	uint32_t	signal_levels = 0;
3549

3550
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3551
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3552 3553 3554
	default:
		signal_levels |= DP_VOLTAGE_0_4;
		break;
3555
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3556 3557
		signal_levels |= DP_VOLTAGE_0_6;
		break;
3558
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3559 3560
		signal_levels |= DP_VOLTAGE_0_8;
		break;
3561
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3562 3563 3564
		signal_levels |= DP_VOLTAGE_1_2;
		break;
	}
3565
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3566
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
3567 3568 3569
	default:
		signal_levels |= DP_PRE_EMPHASIS_0;
		break;
3570
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
3571 3572
		signal_levels |= DP_PRE_EMPHASIS_3_5;
		break;
3573
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
3574 3575
		signal_levels |= DP_PRE_EMPHASIS_6;
		break;
3576
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
3577 3578 3579 3580 3581 3582
		signal_levels |= DP_PRE_EMPHASIS_9_5;
		break;
	}
	return signal_levels;
}

3583
/* SNB CPU eDP voltage swing and pre-emphasis control */
3584
static uint32_t
3585
snb_cpu_edp_signal_levels(uint8_t train_set)
3586
{
3587 3588 3589
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
					 DP_TRAIN_PRE_EMPHASIS_MASK);
	switch (signal_levels) {
3590 3591
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3592
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3593
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3594
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3595 3596
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3597
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3598 3599
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3600
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3601 3602
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3603
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3604
	default:
3605 3606 3607
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
			      "0x%x\n", signal_levels);
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3608 3609 3610
	}
}

3611
/* IVB CPU eDP voltage swing and pre-emphasis control */
K
Keith Packard 已提交
3612
static uint32_t
3613
ivb_cpu_edp_signal_levels(uint8_t train_set)
K
Keith Packard 已提交
3614 3615 3616 3617
{
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
					 DP_TRAIN_PRE_EMPHASIS_MASK);
	switch (signal_levels) {
3618
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
K
Keith Packard 已提交
3619
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
3620
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
K
Keith Packard 已提交
3621
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3622
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
K
Keith Packard 已提交
3623 3624
		return EDP_LINK_TRAIN_400MV_6DB_IVB;

3625
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
K
Keith Packard 已提交
3626
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
3627
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
K
Keith Packard 已提交
3628 3629
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;

3630
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
K
Keith Packard 已提交
3631
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
3632
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
K
Keith Packard 已提交
3633 3634 3635 3636 3637 3638 3639 3640 3641
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;

	default:
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
			      "0x%x\n", signal_levels);
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
	}
}

3642
void
3643
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3644
{
3645
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3646
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3647
	enum port port = intel_dig_port->base.port;
3648
	uint32_t signal_levels, mask = 0;
3649 3650
	uint8_t train_set = intel_dp->train_set[0];

R
Rodrigo Vivi 已提交
3651
	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
3652 3653
		signal_levels = bxt_signal_levels(intel_dp);
	} else if (HAS_DDI(dev_priv)) {
3654
		signal_levels = ddi_signal_levels(intel_dp);
3655
		mask = DDI_BUF_EMP_MASK;
3656
	} else if (IS_CHERRYVIEW(dev_priv)) {
3657
		signal_levels = chv_signal_levels(intel_dp);
3658
	} else if (IS_VALLEYVIEW(dev_priv)) {
3659
		signal_levels = vlv_signal_levels(intel_dp);
3660
	} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3661
		signal_levels = ivb_cpu_edp_signal_levels(train_set);
3662
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3663
	} else if (IS_GEN6(dev_priv) && port == PORT_A) {
3664
		signal_levels = snb_cpu_edp_signal_levels(train_set);
3665 3666
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
	} else {
3667
		signal_levels = g4x_signal_levels(train_set);
3668 3669 3670
		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
	}

3671 3672 3673 3674 3675 3676 3677 3678
	if (mask)
		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);

	DRM_DEBUG_KMS("Using vswing level %d\n",
		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3679

3680
	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3681 3682 3683

	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
	POSTING_READ(intel_dp->output_reg);
3684 3685
}

3686
void
3687 3688
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
				       uint8_t dp_train_pat)
3689
{
3690
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3691 3692
	struct drm_i915_private *dev_priv =
		to_i915(intel_dig_port->base.base.dev);
3693

3694
	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3695

3696
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
C
Chris Wilson 已提交
3697
	POSTING_READ(intel_dp->output_reg);
3698 3699
}

3700
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3701
{
3702
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3703
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3704
	enum port port = intel_dig_port->base.port;
3705 3706
	uint32_t val;

3707
	if (!HAS_DDI(dev_priv))
3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724
		return;

	val = I915_READ(DP_TP_CTL(port));
	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
	I915_WRITE(DP_TP_CTL(port), val);

	/*
	 * On PORT_A we can have only eDP in SST mode. There the only reason
	 * we need to set idle transmission mode is to work around a HW issue
	 * where we enable the pipe while not in idle link-training mode.
	 * In this case there is requirement to wait for a minimum number of
	 * idle patterns to be sent.
	 */
	if (port == PORT_A)
		return;

3725 3726 3727 3728
	if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
				    DP_TP_STATUS_IDLE_DONE,
				    DP_TP_STATUS_IDLE_DONE,
				    1))
3729 3730 3731
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
}

3732
static void
3733 3734
intel_dp_link_down(struct intel_encoder *encoder,
		   const struct intel_crtc_state *old_crtc_state)
3735
{
3736 3737 3738 3739
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
	enum port port = encoder->port;
C
Chris Wilson 已提交
3740
	uint32_t DP = intel_dp->DP;
3741

3742
	if (WARN_ON(HAS_DDI(dev_priv)))
3743 3744
		return;

3745
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3746 3747
		return;

3748
	DRM_DEBUG_KMS("\n");
3749

3750
	if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3751
	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3752
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3753
		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3754
	} else {
3755
		DP &= ~DP_LINK_TRAIN_MASK;
3756
		DP |= DP_LINK_TRAIN_PAT_IDLE;
3757
	}
3758
	I915_WRITE(intel_dp->output_reg, DP);
3759
	POSTING_READ(intel_dp->output_reg);
3760

3761 3762 3763 3764 3765 3766 3767 3768 3769
	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
	I915_WRITE(intel_dp->output_reg, DP);
	POSTING_READ(intel_dp->output_reg);

	/*
	 * HW workaround for IBX, we need to move the port
	 * to transcoder A after disabling it to allow the
	 * matching HDMI port to be enabled on transcoder A.
	 */
3770
	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3771 3772 3773 3774 3775 3776 3777
		/*
		 * We get CPU/PCH FIFO underruns on the other pipe when
		 * doing the workaround. Sweep them under the rug.
		 */
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);

3778
		/* always enable with pattern 1 (as per spec) */
3779 3780 3781
		DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
		DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
			DP_LINK_TRAIN_PAT_1;
3782 3783 3784 3785
		I915_WRITE(intel_dp->output_reg, DP);
		POSTING_READ(intel_dp->output_reg);

		DP &= ~DP_PORT_EN;
3786
		I915_WRITE(intel_dp->output_reg, DP);
3787
		POSTING_READ(intel_dp->output_reg);
3788

3789
		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3790 3791
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3792 3793
	}

3794
	msleep(intel_dp->panel_power_down_delay);
3795 3796

	intel_dp->DP = DP;
3797 3798 3799 3800 3801 3802

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		pps_lock(intel_dp);
		intel_dp->active_pipe = INVALID_PIPE;
		pps_unlock(intel_dp);
	}
3803 3804
}

3805
bool
3806
intel_dp_read_dpcd(struct intel_dp *intel_dp)
3807
{
3808 3809
	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
			     sizeof(intel_dp->dpcd)) < 0)
3810
		return false; /* aux transfer failed */
3811

3812
	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3813

3814 3815
	return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
3816

3817 3818 3819 3820 3821 3822 3823 3824
static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
{
	/*
	 * Clear the cached register set to avoid using stale values
	 * for the sinks that do not support DSC.
	 */
	memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));

3825 3826 3827
	/* Clear fec_capable to avoid using stale values */
	intel_dp->fec_capable = 0;

3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839
	/* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
	    intel_dp->edp_dpcd[0] >= DP_EDP_14) {
		if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
				     intel_dp->dsc_dpcd,
				     sizeof(intel_dp->dsc_dpcd)) < 0)
			DRM_ERROR("Failed to read DPCD register 0x%x\n",
				  DP_DSC_SUPPORT);

		DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
			      (int)sizeof(intel_dp->dsc_dpcd),
			      intel_dp->dsc_dpcd);
3840 3841 3842 3843 3844 3845 3846 3847 3848
		/* FEC is supported only on DP 1.4 */
		if (!intel_dp_is_edp(intel_dp)) {
			if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
					      &intel_dp->fec_capable) < 0)
				DRM_ERROR("Failed to read FEC DPCD register\n");

		DRM_DEBUG_KMS("FEC CAPABILITY: %x\n",
			      intel_dp->fec_capable);
		}
3849 3850 3851
	}
}

3852 3853 3854 3855 3856
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3857

3858 3859
	/* this function is meant to be called only once */
	WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3860

3861
	if (!intel_dp_read_dpcd(intel_dp))
3862 3863
		return false;

3864 3865
	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
			 drm_dp_is_branch(intel_dp->dpcd));
3866

3867 3868 3869
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
		dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
			DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3870

3871 3872 3873 3874 3875 3876 3877 3878 3879 3880
	/*
	 * Read the eDP display control registers.
	 *
	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
	 * set, but require eDP 1.4+ detection (e.g. for supported link rates
	 * method). The display control registers should read zero if they're
	 * not supported anyway.
	 */
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
D
Dan Carpenter 已提交
3881 3882
			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
			     sizeof(intel_dp->edp_dpcd))
3883
		DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3884
			      intel_dp->edp_dpcd);
3885

3886 3887 3888 3889 3890 3891
	/*
	 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
	 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
	 */
	intel_psr_init_dpcd(intel_dp);

3892 3893
	/* Read the eDP 1.4+ supported link rates. */
	if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3894
		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3895 3896
		int i;

3897 3898
		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
				sink_rates, sizeof(sink_rates));
3899

3900 3901
		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
			int val = le16_to_cpu(sink_rates[i]);
3902 3903 3904 3905

			if (val == 0)
				break;

3906 3907 3908 3909 3910 3911
			/* Value read multiplied by 200kHz gives the per-lane
			 * link rate in kHz. The source rates are, however,
			 * stored in terms of LS_Clk kHz. The full conversion
			 * back to symbols is
			 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
			 */
3912
			intel_dp->sink_rates[i] = (val * 200) / 10;
3913
		}
3914
		intel_dp->num_sink_rates = i;
3915
	}
3916

3917 3918 3919 3920
	/*
	 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
	 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
	 */
3921 3922 3923 3924 3925
	if (intel_dp->num_sink_rates)
		intel_dp->use_rate_select = true;
	else
		intel_dp_set_sink_rates(intel_dp);

3926 3927
	intel_dp_set_common_rates(intel_dp);

3928 3929 3930 3931
	/* Read the eDP DSC DPCD registers */
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
		intel_dp_get_dsc_sink_cap(intel_dp);

3932 3933 3934 3935 3936 3937 3938
	return true;
}


static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
3939 3940
	u8 sink_count;

3941 3942 3943
	if (!intel_dp_read_dpcd(intel_dp))
		return false;

3944
	/* Don't clobber cached eDP rates. */
3945
	if (!intel_dp_is_edp(intel_dp)) {
3946
		intel_dp_set_sink_rates(intel_dp);
3947 3948
		intel_dp_set_common_rates(intel_dp);
	}
3949

3950
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
3951 3952 3953 3954 3955 3956 3957
		return false;

	/*
	 * Sink count can change between short pulse hpd hence
	 * a member variable in intel_dp will track any changes
	 * between short pulse interrupts.
	 */
3958
	intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
3959 3960 3961 3962 3963 3964 3965 3966

	/*
	 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
	 * a dongle is present but no display. Unless we require to know
	 * if a dongle is present or not, we don't need to update
	 * downstream port information. So, an early return here saves
	 * time from performing other operations which are not required.
	 */
3967
	if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
3968
		return false;
3969

3970
	if (!drm_dp_is_branch(intel_dp->dpcd))
3971 3972 3973 3974 3975
		return true; /* native DP sink */

	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
		return true; /* no per-port downstream info */

3976 3977 3978
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
			     intel_dp->downstream_ports,
			     DP_MAX_DOWNSTREAM_PORTS) < 0)
3979 3980 3981
		return false; /* downstream port status fetch failed */

	return true;
3982 3983
}

3984
static bool
3985
intel_dp_sink_can_mst(struct intel_dp *intel_dp)
3986
{
3987
	u8 mstm_cap;
3988 3989 3990 3991

	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
		return false;

3992
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
3993
		return false;
3994

3995
	return mstm_cap & DP_MST_CAP;
3996 3997
}

3998 3999 4000 4001 4002 4003 4004 4005
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
	return i915_modparams.enable_dp_mst &&
		intel_dp->can_mst &&
		intel_dp_sink_can_mst(intel_dp);
}

4006 4007 4008
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
4009 4010 4011 4012 4013 4014 4015
	struct intel_encoder *encoder =
		&dp_to_dig_port(intel_dp)->base;
	bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);

	DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
		      port_name(encoder->port), yesno(intel_dp->can_mst),
		      yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
4016 4017 4018 4019

	if (!intel_dp->can_mst)
		return;

4020 4021
	intel_dp->is_mst = sink_can_mst &&
		i915_modparams.enable_dp_mst;
4022 4023 4024

	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
					intel_dp->is_mst);
4025 4026 4027 4028 4029
}

static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
4030 4031 4032
	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
				sink_irq_vector, DP_DPRX_ESI_LEN) ==
		DP_DPRX_ESI_LEN;
4033 4034
}

4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119
u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
				int mode_clock, int mode_hdisplay)
{
	u16 bits_per_pixel, max_bpp_small_joiner_ram;
	int i;

	/*
	 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
	 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
	 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
	 * for MST -> TimeSlotsPerMTP has to be calculated
	 */
	bits_per_pixel = (link_clock * lane_count * 8 *
			  DP_DSC_FEC_OVERHEAD_FACTOR) /
		mode_clock;

	/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
	max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
		mode_hdisplay;

	/*
	 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
	 * check, output bpp from small joiner RAM check)
	 */
	bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);

	/* Error out if the max bpp is less than smallest allowed valid bpp */
	if (bits_per_pixel < valid_dsc_bpp[0]) {
		DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
		return 0;
	}

	/* Find the nearest match in the array of known BPPs from VESA */
	for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
		if (bits_per_pixel < valid_dsc_bpp[i + 1])
			break;
	}
	bits_per_pixel = valid_dsc_bpp[i];

	/*
	 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
	 * fractional part is 0
	 */
	return bits_per_pixel << 4;
}

u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
				int mode_clock,
				int mode_hdisplay)
{
	u8 min_slice_count, i;
	int max_slice_width;

	if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
		min_slice_count = DIV_ROUND_UP(mode_clock,
					       DP_DSC_MAX_ENC_THROUGHPUT_0);
	else
		min_slice_count = DIV_ROUND_UP(mode_clock,
					       DP_DSC_MAX_ENC_THROUGHPUT_1);

	max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
	if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
		DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
			      max_slice_width);
		return 0;
	}
	/* Also take into account max slice width */
	min_slice_count = min_t(uint8_t, min_slice_count,
				DIV_ROUND_UP(mode_hdisplay,
					     max_slice_width));

	/* Find the closest match to the valid slice count values */
	for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
		if (valid_dsc_slicecount[i] >
		    drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
						    false))
			break;
		if (min_slice_count  <= valid_dsc_slicecount[i])
			return valid_dsc_slicecount[i];
	}

	DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
	return 0;
}

4120 4121
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
4122
	int status = 0;
4123
	int test_link_rate;
4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144
	uint8_t test_lane_count, test_link_bw;
	/* (DP CTS 1.2)
	 * 4.3.1.11
	 */
	/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
				   &test_lane_count);

	if (status <= 0) {
		DRM_DEBUG_KMS("Lane count read failed\n");
		return DP_TEST_NAK;
	}
	test_lane_count &= DP_MAX_LANE_COUNT_MASK;

	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
				   &test_link_bw);
	if (status <= 0) {
		DRM_DEBUG_KMS("Link Rate read failed\n");
		return DP_TEST_NAK;
	}
	test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4145 4146 4147 4148

	/* Validate the requested link rate and lane count */
	if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
					test_lane_count))
4149 4150 4151 4152 4153 4154
		return DP_TEST_NAK;

	intel_dp->compliance.test_lane_count = test_lane_count;
	intel_dp->compliance.test_link_rate = test_link_rate;

	return DP_TEST_ACK;
4155 4156 4157 4158
}

static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
4159
	uint8_t test_pattern;
4160
	uint8_t test_misc;
4161 4162 4163 4164
	__be16 h_width, v_height;
	int status = 0;

	/* Read the TEST_PATTERN (DP CTS 3.1.5) */
4165 4166
	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
				   &test_pattern);
4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187
	if (status <= 0) {
		DRM_DEBUG_KMS("Test pattern read failed\n");
		return DP_TEST_NAK;
	}
	if (test_pattern != DP_COLOR_RAMP)
		return DP_TEST_NAK;

	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
				  &h_width, 2);
	if (status <= 0) {
		DRM_DEBUG_KMS("H Width read failed\n");
		return DP_TEST_NAK;
	}

	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
				  &v_height, 2);
	if (status <= 0) {
		DRM_DEBUG_KMS("V Height read failed\n");
		return DP_TEST_NAK;
	}

4188 4189
	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
				   &test_misc);
4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215
	if (status <= 0) {
		DRM_DEBUG_KMS("TEST MISC read failed\n");
		return DP_TEST_NAK;
	}
	if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
		return DP_TEST_NAK;
	if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
		return DP_TEST_NAK;
	switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
	case DP_TEST_BIT_DEPTH_6:
		intel_dp->compliance.test_data.bpc = 6;
		break;
	case DP_TEST_BIT_DEPTH_8:
		intel_dp->compliance.test_data.bpc = 8;
		break;
	default:
		return DP_TEST_NAK;
	}

	intel_dp->compliance.test_data.video_pattern = test_pattern;
	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
	/* Set test active flag here so userspace doesn't interrupt things */
	intel_dp->compliance.test_active = 1;

	return DP_TEST_ACK;
4216 4217 4218
}

static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4219
{
4220
	uint8_t test_result = DP_TEST_ACK;
4221 4222 4223 4224
	struct intel_connector *intel_connector = intel_dp->attached_connector;
	struct drm_connector *connector = &intel_connector->base;

	if (intel_connector->detect_edid == NULL ||
4225
	    connector->edid_corrupt ||
4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238
	    intel_dp->aux.i2c_defer_count > 6) {
		/* Check EDID read for NACKs, DEFERs and corruption
		 * (DP CTS 1.2 Core r1.1)
		 *    4.2.2.4 : Failed EDID read, I2C_NAK
		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
		 *    4.2.2.6 : EDID corruption detected
		 * Use failsafe mode for all cases
		 */
		if (intel_dp->aux.i2c_nack_count > 0 ||
			intel_dp->aux.i2c_defer_count > 0)
			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
				      intel_dp->aux.i2c_nack_count,
				      intel_dp->aux.i2c_defer_count);
4239
		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4240
	} else {
4241 4242 4243 4244 4245 4246 4247
		struct edid *block = intel_connector->detect_edid;

		/* We have to write the checksum
		 * of the last block read
		 */
		block += intel_connector->detect_edid->extensions;

4248 4249
		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
				       block->checksum) <= 0)
4250 4251 4252
			DRM_DEBUG_KMS("Failed to write EDID checksum\n");

		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4253
		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4254 4255 4256
	}

	/* Set test active flag here so userspace doesn't interrupt things */
4257
	intel_dp->compliance.test_active = 1;
4258

4259 4260 4261 4262
	return test_result;
}

static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4263
{
4264 4265 4266 4267 4268 4269 4270
	uint8_t test_result = DP_TEST_NAK;
	return test_result;
}

static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
	uint8_t response = DP_TEST_NAK;
4271 4272
	uint8_t request = 0;
	int status;
4273

4274
	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4275 4276 4277 4278 4279
	if (status <= 0) {
		DRM_DEBUG_KMS("Could not read test request from sink\n");
		goto update_status;
	}

4280
	switch (request) {
4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297
	case DP_TEST_LINK_TRAINING:
		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
		response = intel_dp_autotest_link_training(intel_dp);
		break;
	case DP_TEST_LINK_VIDEO_PATTERN:
		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
		response = intel_dp_autotest_video_pattern(intel_dp);
		break;
	case DP_TEST_LINK_EDID_READ:
		DRM_DEBUG_KMS("EDID test requested\n");
		response = intel_dp_autotest_edid(intel_dp);
		break;
	case DP_TEST_LINK_PHY_TEST_PATTERN:
		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
		response = intel_dp_autotest_phy_pattern(intel_dp);
		break;
	default:
4298
		DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4299 4300 4301
		break;
	}

4302 4303 4304
	if (response & DP_TEST_ACK)
		intel_dp->compliance.test_type = request;

4305
update_status:
4306
	status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4307 4308
	if (status <= 0)
		DRM_DEBUG_KMS("Could not write test response to sink\n");
4309 4310
}

4311 4312 4313 4314 4315 4316
static int
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
	bool bret;

	if (intel_dp->is_mst) {
4317
		u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4318 4319 4320
		int ret = 0;
		int retry;
		bool handled;
4321 4322

		WARN_ON_ONCE(intel_dp->active_mst_links < 0);
4323 4324 4325 4326 4327
		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
go_again:
		if (bret == true) {

			/* check link status - esi[10] = 0x200c */
4328
			if (intel_dp->active_mst_links > 0 &&
4329
			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4330 4331 4332 4333 4334
				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
				intel_dp_start_link_train(intel_dp);
				intel_dp_stop_link_train(intel_dp);
			}

4335
			DRM_DEBUG_KMS("got esi %3ph\n", esi);
4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350
			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);

			if (handled) {
				for (retry = 0; retry < 3; retry++) {
					int wret;
					wret = drm_dp_dpcd_write(&intel_dp->aux,
								 DP_SINK_COUNT_ESI+1,
								 &esi[1], 3);
					if (wret == 3) {
						break;
					}
				}

				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
				if (bret == true) {
4351
					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369
					goto go_again;
				}
			} else
				ret = 0;

			return ret;
		} else {
			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
			intel_dp->is_mst = false;
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
			/* send a hotplug event */
			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
		}
	}
	return -EINVAL;
}

4370 4371 4372 4373 4374
static bool
intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
	u8 link_status[DP_LINK_STATUS_SIZE];

4375 4376 4377 4378
	if (!intel_dp->link_trained)
		return false;

	if (!intel_dp_get_link_status(intel_dp, link_status))
4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394
		return false;

	/*
	 * Validate the cached values of intel_dp->link_rate and
	 * intel_dp->lane_count before attempting to retrain.
	 */
	if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
					intel_dp->lane_count))
		return false;

	/* Retrain if Channel EQ or CR not ok */
	return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
}

int intel_dp_retrain_link(struct intel_encoder *encoder,
			  struct drm_modeset_acquire_ctx *ctx)
4395 4396
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
	struct intel_connector *connector = intel_dp->attached_connector;
	struct drm_connector_state *conn_state;
	struct intel_crtc_state *crtc_state;
	struct intel_crtc *crtc;
	int ret;

	/* FIXME handle the MST connectors as well */

	if (!connector || connector->base.status != connector_status_connected)
		return 0;

	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
			       ctx);
	if (ret)
		return ret;

	conn_state = connector->base.state;

	crtc = to_intel_crtc(conn_state->crtc);
	if (!crtc)
		return 0;

	ret = drm_modeset_lock(&crtc->base.mutex, ctx);
	if (ret)
		return ret;

	crtc_state = to_intel_crtc_state(crtc->base.state);

	WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));

	if (!crtc_state->base.active)
		return 0;

	if (conn_state->commit &&
	    !try_wait_for_completion(&conn_state->commit->hw_done))
		return 0;

	if (!intel_dp_needs_link_retrain(intel_dp))
		return 0;
4437 4438 4439

	/* Suppress underruns caused by re-training */
	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4440
	if (crtc_state->has_pch_encoder)
4441 4442 4443 4444 4445 4446 4447
		intel_set_pch_fifo_underrun_reporting(dev_priv,
						      intel_crtc_pch_transcoder(crtc), false);

	intel_dp_start_link_train(intel_dp);
	intel_dp_stop_link_train(intel_dp);

	/* Keep underrun reporting disabled until things are stable */
4448
	intel_wait_for_vblank(dev_priv, crtc->pipe);
4449 4450

	intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4451
	if (crtc_state->has_pch_encoder)
4452 4453
		intel_set_pch_fifo_underrun_reporting(dev_priv,
						      intel_crtc_pch_transcoder(crtc), true);
4454 4455

	return 0;
4456 4457
}

4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471
/*
 * If display is now connected check links status,
 * there has been known issues of link loss triggering
 * long pulse.
 *
 * Some sinks (eg. ASUS PB287Q) seem to perform some
 * weird HPD ping pong during modesets. So we can apparently
 * end up with HPD going low during a modeset, and then
 * going back up soon after. And once that happens we must
 * retrain the link to get a picture. That's in case no
 * userspace component reacted to intermittent HPD dip.
 */
static bool intel_dp_hotplug(struct intel_encoder *encoder,
			     struct intel_connector *connector)
4472
{
4473 4474 4475
	struct drm_modeset_acquire_ctx ctx;
	bool changed;
	int ret;
4476

4477
	changed = intel_encoder_hotplug(encoder, connector);
4478

4479
	drm_modeset_acquire_init(&ctx, 0);
4480

4481 4482
	for (;;) {
		ret = intel_dp_retrain_link(encoder, &ctx);
4483

4484 4485 4486 4487
		if (ret == -EDEADLK) {
			drm_modeset_backoff(&ctx);
			continue;
		}
4488

4489 4490
		break;
	}
4491

4492 4493 4494
	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);
	WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
4495

4496
	return changed;
4497 4498
}

4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
{
	u8 val;

	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
		return;

	if (drm_dp_dpcd_readb(&intel_dp->aux,
			      DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
		return;

	drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);

	if (val & DP_AUTOMATED_TEST_REQUEST)
		intel_dp_handle_test_request(intel_dp);

4515 4516 4517 4518 4519
	if (val & DP_CP_IRQ)
		intel_hdcp_check_link(intel_dp->attached_connector);

	if (val & DP_SINK_SPECIFIC_IRQ)
		DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
4520 4521
}

4522 4523 4524 4525 4526 4527 4528
/*
 * According to DP spec
 * 5.1.2:
 *  1. Read DPCD
 *  2. Configure link according to Receiver Capabilities
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
 *  4. Check link status on receipt of hot-plug interrupt
4529 4530 4531 4532 4533
 *
 * intel_dp_short_pulse -  handles short pulse interrupts
 * when full detection is not required.
 * Returns %true if short pulse is handled and full detection
 * is NOT required and %false otherwise.
4534
 */
4535
static bool
4536
intel_dp_short_pulse(struct intel_dp *intel_dp)
4537
{
4538
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4539 4540
	u8 old_sink_count = intel_dp->sink_count;
	bool ret;
4541

4542 4543 4544 4545
	/*
	 * Clearing compliance test variables to allow capturing
	 * of values for next automated test request.
	 */
4546
	memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4547

4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558
	/*
	 * Now read the DPCD to see if it's actually running
	 * If the current value of sink count doesn't match with
	 * the value that was stored earlier or dpcd read failed
	 * we need to do full detection
	 */
	ret = intel_dp_get_dpcd(intel_dp);

	if ((old_sink_count != intel_dp->sink_count) || !ret) {
		/* No need to proceed if we are going to do full detect */
		return false;
4559 4560
	}

4561
	intel_dp_check_service_irq(intel_dp);
4562

4563 4564 4565
	/* Handle CEC interrupts, if any */
	drm_dp_cec_irq(&intel_dp->aux);

4566 4567 4568
	/* defer to the hotplug work for link retraining if needed */
	if (intel_dp_needs_link_retrain(intel_dp))
		return false;
4569

4570 4571
	intel_psr_short_pulse(intel_dp);

4572 4573 4574
	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
		DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
		/* Send a Hotplug Uevent to userspace to start modeset */
4575
		drm_kms_helper_hotplug_event(&dev_priv->drm);
4576
	}
4577 4578

	return true;
4579 4580
}

4581
/* XXX this is probably wrong for multiple downstream ports */
4582
static enum drm_connector_status
4583
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4584
{
4585
	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4586 4587 4588
	uint8_t *dpcd = intel_dp->dpcd;
	uint8_t type;

4589 4590 4591
	if (lspcon->active)
		lspcon_resume(lspcon);

4592 4593 4594
	if (!intel_dp_get_dpcd(intel_dp))
		return connector_status_disconnected;

4595
	if (intel_dp_is_edp(intel_dp))
4596 4597
		return connector_status_connected;

4598
	/* if there's no downstream port, we're done */
4599
	if (!drm_dp_is_branch(dpcd))
4600
		return connector_status_connected;
4601 4602

	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4603 4604
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4605

4606 4607
		return intel_dp->sink_count ?
		connector_status_connected : connector_status_disconnected;
4608 4609
	}

4610 4611 4612
	if (intel_dp_can_mst(intel_dp))
		return connector_status_connected;

4613
	/* If no HPD, poke DDC gently */
4614
	if (drm_probe_ddc(&intel_dp->aux.ddc))
4615
		return connector_status_connected;
4616 4617

	/* Well we tried, say unknown for unreliable port types */
4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
		if (type == DP_DS_PORT_TYPE_VGA ||
		    type == DP_DS_PORT_TYPE_NON_EDID)
			return connector_status_unknown;
	} else {
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
			DP_DWN_STRM_PORT_TYPE_MASK;
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
			return connector_status_unknown;
	}
4630 4631 4632

	/* Anything else is out of spec, warn and ignore */
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4633
	return connector_status_disconnected;
4634 4635
}

4636 4637 4638
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
4639
	return connector_status_connected;
4640 4641
}

4642
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
4643
{
4644
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4645
	u32 bit;
4646

4647 4648
	switch (encoder->hpd_pin) {
	case HPD_PORT_B:
4649 4650
		bit = SDE_PORTB_HOTPLUG;
		break;
4651
	case HPD_PORT_C:
4652 4653
		bit = SDE_PORTC_HOTPLUG;
		break;
4654
	case HPD_PORT_D:
4655 4656 4657
		bit = SDE_PORTD_HOTPLUG;
		break;
	default:
4658
		MISSING_CASE(encoder->hpd_pin);
4659 4660 4661 4662 4663 4664
		return false;
	}

	return I915_READ(SDEISR) & bit;
}

4665
static bool cpt_digital_port_connected(struct intel_encoder *encoder)
4666
{
4667
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4668 4669
	u32 bit;

4670 4671
	switch (encoder->hpd_pin) {
	case HPD_PORT_B:
4672 4673
		bit = SDE_PORTB_HOTPLUG_CPT;
		break;
4674
	case HPD_PORT_C:
4675 4676
		bit = SDE_PORTC_HOTPLUG_CPT;
		break;
4677
	case HPD_PORT_D:
4678 4679
		bit = SDE_PORTD_HOTPLUG_CPT;
		break;
4680
	default:
4681
		MISSING_CASE(encoder->hpd_pin);
4682 4683 4684 4685 4686 4687
		return false;
	}

	return I915_READ(SDEISR) & bit;
}

4688
static bool spt_digital_port_connected(struct intel_encoder *encoder)
4689
{
4690
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4691 4692
	u32 bit;

4693 4694
	switch (encoder->hpd_pin) {
	case HPD_PORT_A:
4695 4696
		bit = SDE_PORTA_HOTPLUG_SPT;
		break;
4697
	case HPD_PORT_E:
4698 4699
		bit = SDE_PORTE_HOTPLUG_SPT;
		break;
4700
	default:
4701
		return cpt_digital_port_connected(encoder);
4702
	}
4703

4704
	return I915_READ(SDEISR) & bit;
4705 4706
}

4707
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
4708
{
4709
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4710
	u32 bit;
4711

4712 4713
	switch (encoder->hpd_pin) {
	case HPD_PORT_B:
4714 4715
		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
		break;
4716
	case HPD_PORT_C:
4717 4718
		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
		break;
4719
	case HPD_PORT_D:
4720 4721 4722
		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
		break;
	default:
4723
		MISSING_CASE(encoder->hpd_pin);
4724 4725 4726 4727 4728 4729
		return false;
	}

	return I915_READ(PORT_HOTPLUG_STAT) & bit;
}

4730
static bool gm45_digital_port_connected(struct intel_encoder *encoder)
4731
{
4732
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4733 4734
	u32 bit;

4735 4736
	switch (encoder->hpd_pin) {
	case HPD_PORT_B:
4737
		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4738
		break;
4739
	case HPD_PORT_C:
4740
		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4741
		break;
4742
	case HPD_PORT_D:
4743
		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4744 4745
		break;
	default:
4746
		MISSING_CASE(encoder->hpd_pin);
4747
		return false;
4748 4749
	}

4750
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4751 4752
}

4753
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
4754
{
4755 4756 4757
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);

	if (encoder->hpd_pin == HPD_PORT_A)
4758 4759
		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
	else
4760
		return ibx_digital_port_connected(encoder);
4761 4762
}

4763
static bool snb_digital_port_connected(struct intel_encoder *encoder)
4764
{
4765 4766 4767
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);

	if (encoder->hpd_pin == HPD_PORT_A)
4768 4769
		return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
	else
4770
		return cpt_digital_port_connected(encoder);
4771 4772
}

4773
static bool ivb_digital_port_connected(struct intel_encoder *encoder)
4774
{
4775 4776 4777
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);

	if (encoder->hpd_pin == HPD_PORT_A)
4778 4779
		return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
	else
4780
		return cpt_digital_port_connected(encoder);
4781 4782
}

4783
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
4784
{
4785 4786 4787
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);

	if (encoder->hpd_pin == HPD_PORT_A)
4788 4789
		return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
	else
4790
		return cpt_digital_port_connected(encoder);
4791 4792
}

4793
static bool bxt_digital_port_connected(struct intel_encoder *encoder)
4794
{
4795
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4796 4797
	u32 bit;

4798 4799
	switch (encoder->hpd_pin) {
	case HPD_PORT_A:
4800 4801
		bit = BXT_DE_PORT_HP_DDIA;
		break;
4802
	case HPD_PORT_B:
4803 4804
		bit = BXT_DE_PORT_HP_DDIB;
		break;
4805
	case HPD_PORT_C:
4806 4807 4808
		bit = BXT_DE_PORT_HP_DDIC;
		break;
	default:
4809
		MISSING_CASE(encoder->hpd_pin);
4810 4811 4812 4813 4814 4815
		return false;
	}

	return I915_READ(GEN8_DE_PORT_ISR) & bit;
}

4816 4817 4818 4819 4820 4821 4822 4823
static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
				     struct intel_digital_port *intel_dig_port)
{
	enum port port = intel_dig_port->base.port;

	return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}

4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855
static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
				    struct intel_digital_port *intel_dig_port,
				    bool is_legacy, bool is_typec, bool is_tbt)
{
	enum port port = intel_dig_port->base.port;
	enum tc_port_type old_type = intel_dig_port->tc_type;
	const char *type_str;

	WARN_ON(is_legacy + is_typec + is_tbt != 1);

	if (is_legacy) {
		intel_dig_port->tc_type = TC_PORT_LEGACY;
		type_str = "legacy";
	} else if (is_typec) {
		intel_dig_port->tc_type = TC_PORT_TYPEC;
		type_str = "typec";
	} else if (is_tbt) {
		intel_dig_port->tc_type = TC_PORT_TBT;
		type_str = "tbt";
	} else {
		return;
	}

	/* Types are not supposed to be changed at runtime. */
	WARN_ON(old_type != TC_PORT_UNKNOWN &&
		old_type != intel_dig_port->tc_type);

	if (old_type != intel_dig_port->tc_type)
		DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
			      type_str);
}

4856 4857 4858
static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
				  struct intel_digital_port *dig_port);

4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912
/*
 * This function implements the first part of the Connect Flow described by our
 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
 * lanes, EDID, etc) is done as needed in the typical places.
 *
 * Unlike the other ports, type-C ports are not available to use as soon as we
 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
 * display, USB, etc. As a result, handshaking through FIA is required around
 * connect and disconnect to cleanly transfer ownership with the controller and
 * set the type-C power state.
 *
 * We could opt to only do the connect flow when we actually try to use the AUX
 * channels or do a modeset, then immediately run the disconnect flow after
 * usage, but there are some implications on this for a dynamic environment:
 * things may go away or change behind our backs. So for now our driver is
 * always trying to acquire ownership of the controller as soon as it gets an
 * interrupt (or polls state and sees a port is connected) and only gives it
 * back when it sees a disconnect. Implementation of a more fine-grained model
 * will require a lot of coordination with user space and thorough testing for
 * the extra possible cases.
 */
static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
			       struct intel_digital_port *dig_port)
{
	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
	u32 val;

	if (dig_port->tc_type != TC_PORT_LEGACY &&
	    dig_port->tc_type != TC_PORT_TYPEC)
		return true;

	val = I915_READ(PORT_TX_DFLEXDPPMS);
	if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
		DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
		return false;
	}

	/*
	 * This function may be called many times in a row without an HPD event
	 * in between, so try to avoid the write when we can.
	 */
	val = I915_READ(PORT_TX_DFLEXDPCSSS);
	if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
		I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
	}

	/*
	 * Now we have to re-check the live state, in case the port recently
	 * became disconnected. Not necessary for legacy mode.
	 */
	if (dig_port->tc_type == TC_PORT_TYPEC &&
	    !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
		DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
4913
		icl_tc_phy_disconnect(dev_priv, dig_port);
4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928
		return false;
	}

	return true;
}

/*
 * See the comment at the connect function. This implements the Disconnect
 * Flow.
 */
static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
				  struct intel_digital_port *dig_port)
{
	enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);

4929
	if (dig_port->tc_type == TC_PORT_UNKNOWN)
4930 4931 4932
		return;

	/*
4933 4934
	 * TBT disconnection flow is read the live status, what was done in
	 * caller.
4935
	 */
4936 4937 4938 4939 4940
	if (dig_port->tc_type == TC_PORT_TYPEC ||
	    dig_port->tc_type == TC_PORT_LEGACY) {
		u32 val;

		val = I915_READ(PORT_TX_DFLEXDPCSSS);
4941 4942 4943
		val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
		I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
	}
4944 4945

	dig_port->tc_type = TC_PORT_UNKNOWN;
4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957
}

/*
 * The type-C ports are different because even when they are connected, they may
 * not be available/usable by the graphics driver: see the comment on
 * icl_tc_phy_connect(). So in our driver instead of adding the additional
 * concept of "usable" and make everything check for "connected and usable" we
 * define a port as "connected" when it is not only connected, but also when it
 * is usable by the rest of the driver. That maintains the old assumption that
 * connected ports are usable, and avoids exposing to the users objects they
 * can't really use.
 */
4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975
static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
				  struct intel_digital_port *intel_dig_port)
{
	enum port port = intel_dig_port->base.port;
	enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
	bool is_legacy, is_typec, is_tbt;
	u32 dpsp;

	is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port);

	/*
	 * The spec says we shouldn't be using the ISR bits for detecting
	 * between TC and TBT. We should use DFLEXDPSP.
	 */
	dpsp = I915_READ(PORT_TX_DFLEXDPSP);
	is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
	is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);

4976 4977
	if (!is_legacy && !is_typec && !is_tbt) {
		icl_tc_phy_disconnect(dev_priv, intel_dig_port);
4978
		return false;
4979
	}
4980 4981 4982

	icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
				is_tbt);
4983

4984 4985 4986
	if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
		return false;

4987
	return true;
4988 4989 4990 4991 4992 4993 4994
}

static bool icl_digital_port_connected(struct intel_encoder *encoder)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);

4995
	if (intel_port_is_combophy(dev_priv, encoder->port))
4996
		return icl_combo_port_connected(dev_priv, dig_port);
4997
	else if (intel_port_is_tc(dev_priv, encoder->port))
4998
		return icl_tc_port_connected(dev_priv, dig_port);
4999
	else
5000
		MISSING_CASE(encoder->hpd_pin);
5001 5002

	return false;
5003 5004
}

5005 5006
/*
 * intel_digital_port_connected - is the specified port connected?
5007
 * @encoder: intel_encoder
5008
 *
5009 5010 5011 5012 5013
 * In cases where there's a connector physically connected but it can't be used
 * by our hardware we also return false, since the rest of the driver should
 * pretty much treat the port as disconnected. This is relevant for type-C
 * (starting on ICL) where there's ownership involved.
 *
5014
 * Return %true if port is connected, %false otherwise.
5015
 */
5016
bool intel_digital_port_connected(struct intel_encoder *encoder)
5017
{
5018 5019
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);

5020 5021
	if (HAS_GMCH_DISPLAY(dev_priv)) {
		if (IS_GM45(dev_priv))
5022
			return gm45_digital_port_connected(encoder);
5023
		else
5024
			return g4x_digital_port_connected(encoder);
5025 5026
	}

5027 5028 5029 5030
	if (INTEL_GEN(dev_priv) >= 11)
		return icl_digital_port_connected(encoder);
	else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
		return spt_digital_port_connected(encoder);
5031
	else if (IS_GEN9_LP(dev_priv))
5032
		return bxt_digital_port_connected(encoder);
5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043
	else if (IS_GEN8(dev_priv))
		return bdw_digital_port_connected(encoder);
	else if (IS_GEN7(dev_priv))
		return ivb_digital_port_connected(encoder);
	else if (IS_GEN6(dev_priv))
		return snb_digital_port_connected(encoder);
	else if (IS_GEN5(dev_priv))
		return ilk_digital_port_connected(encoder);

	MISSING_CASE(INTEL_GEN(dev_priv));
	return false;
5044 5045
}

5046
static struct edid *
5047
intel_dp_get_edid(struct intel_dp *intel_dp)
5048
{
5049
	struct intel_connector *intel_connector = intel_dp->attached_connector;
5050

5051 5052 5053 5054
	/* use cached edid if we have one */
	if (intel_connector->edid) {
		/* invalid edid */
		if (IS_ERR(intel_connector->edid))
5055 5056
			return NULL;

J
Jani Nikula 已提交
5057
		return drm_edid_duplicate(intel_connector->edid);
5058 5059 5060 5061
	} else
		return drm_get_edid(&intel_connector->base,
				    &intel_dp->aux.ddc);
}
5062

5063 5064 5065 5066 5067
static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
	struct intel_connector *intel_connector = intel_dp->attached_connector;
	struct edid *edid;
5068

5069
	intel_dp_unset_edid(intel_dp);
5070 5071 5072
	edid = intel_dp_get_edid(intel_dp);
	intel_connector->detect_edid = edid;

5073
	intel_dp->has_audio = drm_detect_monitor_audio(edid);
5074
	drm_dp_cec_set_edid(&intel_dp->aux, edid);
5075 5076
}

5077 5078
static void
intel_dp_unset_edid(struct intel_dp *intel_dp)
5079
{
5080
	struct intel_connector *intel_connector = intel_dp->attached_connector;
5081

5082
	drm_dp_cec_unset_edid(&intel_dp->aux);
5083 5084
	kfree(intel_connector->detect_edid);
	intel_connector->detect_edid = NULL;
5085

5086 5087
	intel_dp->has_audio = false;
}
5088

5089
static int
5090 5091 5092
intel_dp_detect(struct drm_connector *connector,
		struct drm_modeset_acquire_ctx *ctx,
		bool force)
Z
Zhenyu Wang 已提交
5093
{
5094 5095
	struct drm_i915_private *dev_priv = to_i915(connector->dev);
	struct intel_dp *intel_dp = intel_attached_dp(connector);
5096 5097
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct intel_encoder *encoder = &dig_port->base;
Z
Zhenyu Wang 已提交
5098
	enum drm_connector_status status;
5099 5100
	enum intel_display_power_domain aux_domain =
		intel_aux_power_domain(dig_port);
Z
Zhenyu Wang 已提交
5101

5102 5103
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
		      connector->base.id, connector->name);
5104
	WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5105

5106
	intel_display_power_get(dev_priv, aux_domain);
Z
Zhenyu Wang 已提交
5107

5108
	/* Can't disconnect eDP */
5109
	if (intel_dp_is_edp(intel_dp))
5110
		status = edp_detect(intel_dp);
5111
	else if (intel_digital_port_connected(encoder))
5112
		status = intel_dp_detect_dpcd(intel_dp);
Z
Zhenyu Wang 已提交
5113
	else
5114 5115
		status = connector_status_disconnected;

5116
	if (status == connector_status_disconnected) {
5117
		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5118
		memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5119

5120 5121 5122 5123 5124 5125 5126 5127 5128
		if (intel_dp->is_mst) {
			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
				      intel_dp->is_mst,
				      intel_dp->mst_mgr.mst_state);
			intel_dp->is_mst = false;
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
							intel_dp->is_mst);
		}

5129
		goto out;
5130
	}
Z
Zhenyu Wang 已提交
5131

5132
	if (intel_dp->reset_link_params) {
5133 5134
		/* Initial max link lane count */
		intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5135

5136 5137
		/* Initial max link rate */
		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5138 5139 5140

		intel_dp->reset_link_params = false;
	}
5141

5142 5143
	intel_dp_print_rates(intel_dp);

5144 5145 5146 5147
	/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
	if (INTEL_GEN(dev_priv) >= 11)
		intel_dp_get_dsc_sink_cap(intel_dp);

5148 5149
	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
			 drm_dp_is_branch(intel_dp->dpcd));
5150

5151 5152 5153
	intel_dp_configure_mst(intel_dp);

	if (intel_dp->is_mst) {
5154 5155 5156 5157 5158
		/*
		 * If we are in MST mode then this connector
		 * won't appear connected or have anything
		 * with EDID on it
		 */
5159 5160
		status = connector_status_disconnected;
		goto out;
5161 5162 5163 5164 5165 5166
	}

	/*
	 * Some external monitors do not signal loss of link synchronization
	 * with an IRQ_HPD, so force a link status check.
	 */
5167 5168 5169 5170 5171
	if (!intel_dp_is_edp(intel_dp)) {
		int ret;

		ret = intel_dp_retrain_link(encoder, ctx);
		if (ret) {
5172
			intel_display_power_put(dev_priv, aux_domain);
5173 5174 5175
			return ret;
		}
	}
5176

5177 5178 5179 5180 5181 5182 5183 5184
	/*
	 * Clearing NACK and defer counts to get their exact values
	 * while reading EDID which are required by Compliance tests
	 * 4.2.2.4 and 4.2.2.5
	 */
	intel_dp->aux.i2c_nack_count = 0;
	intel_dp->aux.i2c_defer_count = 0;

5185
	intel_dp_set_edid(intel_dp);
5186 5187
	if (intel_dp_is_edp(intel_dp) ||
	    to_intel_connector(connector)->detect_edid)
5188
		status = connector_status_connected;
5189

5190
	intel_dp_check_service_irq(intel_dp);
5191

5192
out:
5193
	if (status != connector_status_connected && !intel_dp->is_mst)
5194
		intel_dp_unset_edid(intel_dp);
5195

5196
	intel_display_power_put(dev_priv, aux_domain);
5197
	return status;
5198 5199
}

5200 5201
static void
intel_dp_force(struct drm_connector *connector)
5202
{
5203
	struct intel_dp *intel_dp = intel_attached_dp(connector);
5204 5205
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct intel_encoder *intel_encoder = &dig_port->base;
5206
	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5207 5208
	enum intel_display_power_domain aux_domain =
		intel_aux_power_domain(dig_port);
5209

5210 5211 5212
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
		      connector->base.id, connector->name);
	intel_dp_unset_edid(intel_dp);
5213

5214 5215
	if (connector->status != connector_status_connected)
		return;
5216

5217
	intel_display_power_get(dev_priv, aux_domain);
5218 5219 5220

	intel_dp_set_edid(intel_dp);

5221
	intel_display_power_put(dev_priv, aux_domain);
5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234
}

static int intel_dp_get_modes(struct drm_connector *connector)
{
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct edid *edid;

	edid = intel_connector->detect_edid;
	if (edid) {
		int ret = intel_connector_update_modes(connector, edid);
		if (ret)
			return ret;
	}
5235

5236
	/* if eDP has no EDID, fall back to fixed mode */
5237
	if (intel_dp_is_edp(intel_attached_dp(connector)) &&
5238
	    intel_connector->panel.fixed_mode) {
5239
		struct drm_display_mode *mode;
5240 5241

		mode = drm_mode_duplicate(connector->dev,
5242
					  intel_connector->panel.fixed_mode);
5243
		if (mode) {
5244 5245 5246 5247
			drm_mode_probed_add(connector, mode);
			return 1;
		}
	}
5248

5249
	return 0;
5250 5251
}

5252 5253 5254 5255
static int
intel_dp_connector_register(struct drm_connector *connector)
{
	struct intel_dp *intel_dp = intel_attached_dp(connector);
5256
	struct drm_device *dev = connector->dev;
5257 5258 5259 5260 5261
	int ret;

	ret = intel_connector_register(connector);
	if (ret)
		return ret;
5262 5263 5264 5265 5266 5267 5268

	i915_debugfs_connector_add(connector);

	DRM_DEBUG_KMS("registering %s bus for %s\n",
		      intel_dp->aux.name, connector->kdev->kobj.name);

	intel_dp->aux.dev = connector->kdev;
5269 5270 5271 5272 5273
	ret = drm_dp_aux_register(&intel_dp->aux);
	if (!ret)
		drm_dp_cec_register_connector(&intel_dp->aux,
					      connector->name, dev->dev);
	return ret;
5274 5275
}

5276 5277 5278
static void
intel_dp_connector_unregister(struct drm_connector *connector)
{
5279 5280 5281 5282
	struct intel_dp *intel_dp = intel_attached_dp(connector);

	drm_dp_cec_unregister_connector(&intel_dp->aux);
	drm_dp_aux_unregister(&intel_dp->aux);
5283 5284 5285
	intel_connector_unregister(connector);
}

P
Paulo Zanoni 已提交
5286
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5287
{
5288 5289
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5290

5291
	intel_dp_mst_encoder_cleanup(intel_dig_port);
5292
	if (intel_dp_is_edp(intel_dp)) {
5293
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5294 5295 5296 5297
		/*
		 * vdd might still be enabled do to the delayed vdd off.
		 * Make sure vdd is actually turned off here.
		 */
5298
		pps_lock(intel_dp);
5299
		edp_panel_vdd_off_sync(intel_dp);
5300 5301
		pps_unlock(intel_dp);

5302 5303 5304 5305
		if (intel_dp->edp_notifier.notifier_call) {
			unregister_reboot_notifier(&intel_dp->edp_notifier);
			intel_dp->edp_notifier.notifier_call = NULL;
		}
5306
	}
5307 5308 5309

	intel_dp_aux_fini(intel_dp);

5310
	drm_encoder_cleanup(encoder);
5311
	kfree(intel_dig_port);
5312 5313
}

5314
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5315 5316 5317
{
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);

5318
	if (!intel_dp_is_edp(intel_dp))
5319 5320
		return;

5321 5322 5323 5324
	/*
	 * vdd might still be enabled do to the delayed vdd off.
	 * Make sure vdd is actually turned off here.
	 */
5325
	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5326
	pps_lock(intel_dp);
5327
	edp_panel_vdd_off_sync(intel_dp);
5328
	pps_unlock(intel_dp);
5329 5330
}

5331 5332 5333 5334 5335
static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
				u8 *an)
{
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5336 5337 5338 5339 5340 5341
	static const struct drm_dp_aux_msg msg = {
		.request = DP_AUX_NATIVE_WRITE,
		.address = DP_AUX_HDCP_AKSV,
		.size = DRM_HDCP_KSV_LEN,
	};
	uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5342 5343 5344 5345 5346 5347 5348
	ssize_t dpcd_ret;
	int ret;

	/* Output An first, that's easy */
	dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
				     an, DRM_HDCP_AN_LEN);
	if (dpcd_ret != DRM_HDCP_AN_LEN) {
5349 5350
		DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
			      dpcd_ret);
5351 5352 5353 5354 5355 5356 5357 5358 5359
		return dpcd_ret >= 0 ? -EIO : dpcd_ret;
	}

	/*
	 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
	 * order to get it on the wire, we need to create the AUX header as if
	 * we were writing the data, and then tickle the hardware to output the
	 * data once the header is sent out.
	 */
5360
	intel_dp_aux_header(txbuf, &msg);
5361

5362
	ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5363 5364
				rxbuf, sizeof(rxbuf),
				DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5365
	if (ret < 0) {
5366
		DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5367 5368
		return ret;
	} else if (ret == 0) {
5369
		DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383
		return -EIO;
	}

	reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
	return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
}

static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
				   u8 *bksv)
{
	ssize_t ret;
	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
			       DRM_HDCP_KSV_LEN);
	if (ret != DRM_HDCP_KSV_LEN) {
5384
		DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401
		return ret >= 0 ? -EIO : ret;
	}
	return 0;
}

static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
				      u8 *bstatus)
{
	ssize_t ret;
	/*
	 * For some reason the HDMI and DP HDCP specs call this register
	 * definition by different names. In the HDMI spec, it's called BSTATUS,
	 * but in DP it's called BINFO.
	 */
	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
			       bstatus, DRM_HDCP_BSTATUS_LEN);
	if (ret != DRM_HDCP_BSTATUS_LEN) {
5402
		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5403 5404 5405 5406 5407 5408
		return ret >= 0 ? -EIO : ret;
	}
	return 0;
}

static
5409 5410
int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
			     u8 *bcaps)
5411 5412
{
	ssize_t ret;
5413

5414
	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5415
			       bcaps, 1);
5416
	if (ret != 1) {
5417
		DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
5418 5419
		return ret >= 0 ? -EIO : ret;
	}
5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434

	return 0;
}

static
int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
				   bool *repeater_present)
{
	ssize_t ret;
	u8 bcaps;

	ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
	if (ret)
		return ret;

5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446
	*repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
	return 0;
}

static
int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
				u8 *ri_prime)
{
	ssize_t ret;
	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
			       ri_prime, DRM_HDCP_RI_LEN);
	if (ret != DRM_HDCP_RI_LEN) {
5447
		DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461
		return ret >= 0 ? -EIO : ret;
	}
	return 0;
}

static
int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
				 bool *ksv_ready)
{
	ssize_t ret;
	u8 bstatus;
	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
			       &bstatus, 1);
	if (ret != 1) {
5462
		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483
		return ret >= 0 ? -EIO : ret;
	}
	*ksv_ready = bstatus & DP_BSTATUS_READY;
	return 0;
}

static
int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
				int num_downstream, u8 *ksv_fifo)
{
	ssize_t ret;
	int i;

	/* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
	for (i = 0; i < num_downstream; i += 3) {
		size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
		ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
				       DP_AUX_HDCP_KSV_FIFO,
				       ksv_fifo + i * DRM_HDCP_KSV_LEN,
				       len);
		if (ret != len) {
5484 5485
			DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
				      i, ret);
5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504
			return ret >= 0 ? -EIO : ret;
		}
	}
	return 0;
}

static
int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
				    int i, u32 *part)
{
	ssize_t ret;

	if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
		return -EINVAL;

	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
			       DP_AUX_HDCP_V_PRIME(i), part,
			       DRM_HDCP_V_PRIME_PART_LEN);
	if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5505
		DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523
		return ret >= 0 ? -EIO : ret;
	}
	return 0;
}

static
int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
				    bool enable)
{
	/* Not used for single stream DisplayPort setups */
	return 0;
}

static
bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
	ssize_t ret;
	u8 bstatus;
5524

5525 5526 5527
	ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
			       &bstatus, 1);
	if (ret != 1) {
5528
		DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5529
		return false;
5530
	}
5531

5532 5533 5534
	return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
}

5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549
static
int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
			  bool *hdcp_capable)
{
	ssize_t ret;
	u8 bcaps;

	ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
	if (ret)
		return ret;

	*hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
	return 0;
}

5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
	.write_an_aksv = intel_dp_hdcp_write_an_aksv,
	.read_bksv = intel_dp_hdcp_read_bksv,
	.read_bstatus = intel_dp_hdcp_read_bstatus,
	.repeater_present = intel_dp_hdcp_repeater_present,
	.read_ri_prime = intel_dp_hdcp_read_ri_prime,
	.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
	.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
	.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
	.toggle_signalling = intel_dp_hdcp_toggle_signalling,
	.check_link = intel_dp_hdcp_check_link,
5561
	.hdcp_capable = intel_dp_hdcp_capable,
5562 5563
};

5564 5565
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
5566
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5567
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580

	lockdep_assert_held(&dev_priv->pps_mutex);

	if (!edp_have_panel_vdd(intel_dp))
		return;

	/*
	 * The VDD bit needs a power domain reference, so if the bit is
	 * already enabled when we boot or resume, grab this reference and
	 * schedule a vdd off, so we don't hold on to the reference
	 * indefinitely.
	 */
	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5581
	intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
5582 5583 5584 5585

	edp_panel_vdd_schedule_off(intel_dp);
}

5586 5587
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
5588
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5589 5590
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	enum pipe pipe;
5591

5592 5593 5594
	if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
				  encoder->port, &pipe))
		return pipe;
5595

5596
	return INVALID_PIPE;
5597 5598
}

5599
void intel_dp_encoder_reset(struct drm_encoder *encoder)
5600
{
5601
	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5602 5603
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5604 5605 5606

	if (!HAS_DDI(dev_priv))
		intel_dp->DP = I915_READ(intel_dp->output_reg);
5607

5608
	if (lspcon->active)
5609 5610
		lspcon_resume(lspcon);

5611 5612
	intel_dp->reset_link_params = true;

5613 5614
	pps_lock(intel_dp);

5615 5616 5617
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		intel_dp->active_pipe = vlv_active_pipe(intel_dp);

5618
	if (intel_dp_is_edp(intel_dp)) {
5619
		/* Reinit the power sequencer, in case BIOS did something with it. */
5620
		intel_dp_pps_init(intel_dp);
5621 5622
		intel_edp_panel_vdd_sanitize(intel_dp);
	}
5623 5624

	pps_unlock(intel_dp);
5625 5626
}

5627
static const struct drm_connector_funcs intel_dp_connector_funcs = {
5628
	.force = intel_dp_force,
5629
	.fill_modes = drm_helper_probe_single_connector_modes,
5630 5631
	.atomic_get_property = intel_digital_connector_atomic_get_property,
	.atomic_set_property = intel_digital_connector_atomic_set_property,
5632
	.late_register = intel_dp_connector_register,
5633
	.early_unregister = intel_dp_connector_unregister,
5634
	.destroy = intel_connector_destroy,
5635
	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5636
	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
5637 5638 5639
};

static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5640
	.detect_ctx = intel_dp_detect,
5641 5642
	.get_modes = intel_dp_get_modes,
	.mode_valid = intel_dp_mode_valid,
5643
	.atomic_check = intel_digital_connector_atomic_check,
5644 5645 5646
};

static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5647
	.reset = intel_dp_encoder_reset,
5648
	.destroy = intel_dp_encoder_destroy,
5649 5650
};

5651
enum irqreturn
5652 5653 5654
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5655
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5656
	enum irqreturn ret = IRQ_NONE;
5657

5658 5659 5660 5661 5662 5663 5664 5665
	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
		/*
		 * vdd off can generate a long pulse on eDP which
		 * would require vdd on to handle it, and thus we
		 * would end up in an endless cycle of
		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
		 */
		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5666
			      port_name(intel_dig_port->base.port));
5667
		return IRQ_HANDLED;
5668 5669
	}

5670
	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5671
		      port_name(intel_dig_port->base.port),
5672
		      long_hpd ? "long" : "short");
5673

5674
	if (long_hpd) {
5675
		intel_dp->reset_link_params = true;
5676 5677 5678
		return IRQ_NONE;
	}

5679 5680
	intel_display_power_get(dev_priv,
				intel_aux_power_domain(intel_dig_port));
5681

5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693
	if (intel_dp->is_mst) {
		if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
			/*
			 * If we were in MST mode, and device is not
			 * there, get out of MST mode
			 */
			DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
				      intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
			intel_dp->is_mst = false;
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
							intel_dp->is_mst);
			goto put_power;
5694
		}
5695
	}
5696

5697
	if (!intel_dp->is_mst) {
5698
		bool handled;
5699 5700 5701

		handled = intel_dp_short_pulse(intel_dp);

5702
		if (!handled)
5703
			goto put_power;
5704
	}
5705 5706 5707

	ret = IRQ_HANDLED;

5708
put_power:
5709 5710
	intel_display_power_put(dev_priv,
				intel_aux_power_domain(intel_dig_port));
5711 5712

	return ret;
5713 5714
}

5715
/* check the VBT to see whether the eDP is on another port */
5716
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
5717
{
5718 5719 5720 5721
	/*
	 * eDP not supported on g4x. so bail out early just
	 * for a bit extra safety in case the VBT is bonkers.
	 */
5722
	if (INTEL_GEN(dev_priv) < 5)
5723 5724
		return false;

5725
	if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5726 5727
		return true;

5728
	return intel_bios_is_port_edp(dev_priv, port);
5729 5730
}

5731
static void
5732 5733
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
5734
	struct drm_i915_private *dev_priv = to_i915(connector->dev);
5735 5736 5737 5738
	enum port port = dp_to_dig_port(intel_dp)->base.port;

	if (!IS_G4X(dev_priv) && port != PORT_A)
		intel_attach_force_audio_property(connector);
5739

5740
	intel_attach_broadcast_rgb_property(connector);
5741 5742 5743 5744
	if (HAS_GMCH_DISPLAY(dev_priv))
		drm_connector_attach_max_bpc_property(connector, 6, 10);
	else if (INTEL_GEN(dev_priv) >= 5)
		drm_connector_attach_max_bpc_property(connector, 6, 12);
5745

5746
	if (intel_dp_is_edp(intel_dp)) {
5747 5748 5749 5750 5751 5752 5753 5754
		u32 allowed_scalers;

		allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
		if (!HAS_GMCH_DISPLAY(dev_priv))
			allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);

		drm_connector_attach_scaling_mode_property(connector, allowed_scalers);

5755
		connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
5756

5757
	}
5758 5759
}

5760 5761
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
{
5762
	intel_dp->panel_power_off_time = ktime_get_boottime();
5763 5764 5765 5766
	intel_dp->last_power_on = jiffies;
	intel_dp->last_backlight_off = jiffies;
}

5767
static void
5768
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
5769
{
5770
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5771
	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5772
	struct pps_registers regs;
5773

5774
	intel_pps_get_registers(intel_dp, &regs);
5775 5776 5777

	/* Workaround: Need to write PP_CONTROL with the unlock key as
	 * the very first thing. */
5778
	pp_ctl = ironlake_get_pp_control(intel_dp);
5779

5780 5781
	pp_on = I915_READ(regs.pp_on);
	pp_off = I915_READ(regs.pp_off);
5782 5783
	if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
	    !HAS_PCH_ICP(dev_priv)) {
5784 5785
		I915_WRITE(regs.pp_ctrl, pp_ctl);
		pp_div = I915_READ(regs.pp_div);
5786
	}
5787 5788

	/* Pull timing values out of registers */
5789 5790
	seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
		     PANEL_POWER_UP_DELAY_SHIFT;
5791

5792 5793
	seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
		  PANEL_LIGHT_ON_DELAY_SHIFT;
5794

5795 5796
	seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
		  PANEL_LIGHT_OFF_DELAY_SHIFT;
5797

5798 5799
	seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
		   PANEL_POWER_DOWN_DELAY_SHIFT;
5800

5801 5802
	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
	    HAS_PCH_ICP(dev_priv)) {
5803 5804
		seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
				BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
5805
	} else {
5806
		seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5807
		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5808
	}
5809 5810
}

I
Imre Deak 已提交
5811 5812 5813 5814 5815 5816 5817 5818 5819
static void
intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
{
	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
		      state_name,
		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
}

static void
5820
intel_pps_verify_state(struct intel_dp *intel_dp)
I
Imre Deak 已提交
5821 5822 5823 5824
{
	struct edp_power_seq hw;
	struct edp_power_seq *sw = &intel_dp->pps_delays;

5825
	intel_pps_readout_hw_state(intel_dp, &hw);
I
Imre Deak 已提交
5826 5827 5828 5829 5830 5831 5832 5833 5834

	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
		DRM_ERROR("PPS state mismatch\n");
		intel_pps_dump_state("sw", sw);
		intel_pps_dump_state("hw", &hw);
	}
}

5835
static void
5836
intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
5837
{
5838
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5839 5840 5841 5842 5843 5844 5845 5846 5847
	struct edp_power_seq cur, vbt, spec,
		*final = &intel_dp->pps_delays;

	lockdep_assert_held(&dev_priv->pps_mutex);

	/* already initialized? */
	if (final->t11_t12 != 0)
		return;

5848
	intel_pps_readout_hw_state(intel_dp, &cur);
5849

I
Imre Deak 已提交
5850
	intel_pps_dump_state("cur", &cur);
5851

5852
	vbt = dev_priv->vbt.edp.pps;
5853 5854 5855 5856 5857 5858
	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
	 * of 500ms appears to be too short. Ocassionally the panel
	 * just fails to power back on. Increasing the delay to 800ms
	 * seems sufficient to avoid this problem.
	 */
	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
5859
		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
5860 5861 5862
		DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
			      vbt.t11_t12);
	}
5863 5864 5865 5866 5867
	/* T11_T12 delay is special and actually in units of 100ms, but zero
	 * based in the hw (so we need to add 100 ms). But the sw vbt
	 * table multiplies it with 1000 to make it in units of 100usec,
	 * too. */
	vbt.t11_t12 += 100 * 10;
5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880

	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
	 * our hw here, which are all in 100usec. */
	spec.t1_t3 = 210 * 10;
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
	spec.t10 = 500 * 10;
	/* This one is special and actually in units of 100ms, but zero
	 * based in the hw (so we need to add 100 ms). But the sw vbt
	 * table multiplies it with 1000 to make it in units of 100usec,
	 * too. */
	spec.t11_t12 = (510 + 100) * 10;

I
Imre Deak 已提交
5881
	intel_pps_dump_state("vbt", &vbt);
5882 5883 5884

	/* Use the max of the register settings and vbt. If both are
	 * unset, fall back to the spec limits. */
5885
#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
5886 5887 5888 5889 5890 5891 5892 5893 5894
				       spec.field : \
				       max(cur.field, vbt.field))
	assign_final(t1_t3);
	assign_final(t8);
	assign_final(t9);
	assign_final(t10);
	assign_final(t11_t12);
#undef assign_final

5895
#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
5896 5897 5898 5899 5900 5901 5902
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
	intel_dp->backlight_on_delay = get_delay(t8);
	intel_dp->backlight_off_delay = get_delay(t9);
	intel_dp->panel_power_down_delay = get_delay(t10);
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay

5903 5904 5905 5906 5907 5908
	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
		      intel_dp->panel_power_cycle_delay);

	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
I
Imre Deak 已提交
5909 5910 5911 5912 5913 5914 5915 5916 5917 5918

	/*
	 * We override the HW backlight delays to 1 because we do manual waits
	 * on them. For T8, even BSpec recommends doing it. For T9, if we
	 * don't do this, we'll end up waiting for the backlight off delay
	 * twice: once when we do the manual sleep, and once when we disable
	 * the panel and wait for the PP_STATUS bit to become zero.
	 */
	final->t8 = 1;
	final->t9 = 1;
5919 5920 5921 5922 5923 5924

	/*
	 * HW has only a 100msec granularity for t11_t12 so round it up
	 * accordingly.
	 */
	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
5925 5926 5927
}

static void
5928
intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5929
					      bool force_disable_vdd)
5930
{
5931
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5932
	u32 pp_on, pp_off, pp_div, port_sel = 0;
5933
	int div = dev_priv->rawclk_freq / 1000;
5934
	struct pps_registers regs;
5935
	enum port port = dp_to_dig_port(intel_dp)->base.port;
5936
	const struct edp_power_seq *seq = &intel_dp->pps_delays;
5937

V
Ville Syrjälä 已提交
5938
	lockdep_assert_held(&dev_priv->pps_mutex);
5939

5940
	intel_pps_get_registers(intel_dp, &regs);
5941

5942 5943
	/*
	 * On some VLV machines the BIOS can leave the VDD
5944
	 * enabled even on power sequencers which aren't
5945 5946 5947 5948 5949 5950 5951
	 * hooked up to any port. This would mess up the
	 * power domain tracking the first time we pick
	 * one of these power sequencers for use since
	 * edp_panel_vdd_on() would notice that the VDD was
	 * already on and therefore wouldn't grab the power
	 * domain reference. Disable VDD first to avoid this.
	 * This also avoids spuriously turning the VDD on as
5952
	 * soon as the new power sequencer gets initialized.
5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966
	 */
	if (force_disable_vdd) {
		u32 pp = ironlake_get_pp_control(intel_dp);

		WARN(pp & PANEL_POWER_ON, "Panel power already on\n");

		if (pp & EDP_FORCE_VDD)
			DRM_DEBUG_KMS("VDD already on, disabling first\n");

		pp &= ~EDP_FORCE_VDD;

		I915_WRITE(regs.pp_ctrl, pp);
	}

5967
	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
I
Imre Deak 已提交
5968 5969
		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5970
		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5971 5972
	/* Compute the divisor for the pp clock, simply match the Bspec
	 * formula. */
5973 5974
	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
	    HAS_PCH_ICP(dev_priv)) {
5975
		pp_div = I915_READ(regs.pp_ctrl);
5976
		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5977
		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5978 5979 5980 5981 5982 5983
				<< BXT_POWER_CYCLE_DELAY_SHIFT);
	} else {
		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
	}
5984 5985 5986

	/* Haswell doesn't have any port selection bits for the panel
	 * power sequencer any more. */
5987
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5988
		port_sel = PANEL_PORT_SELECT_VLV(port);
5989
	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5990 5991
		switch (port) {
		case PORT_A:
5992
			port_sel = PANEL_PORT_SELECT_DPA;
5993 5994 5995 5996 5997
			break;
		case PORT_C:
			port_sel = PANEL_PORT_SELECT_DPC;
			break;
		case PORT_D:
5998
			port_sel = PANEL_PORT_SELECT_DPD;
5999 6000 6001 6002 6003
			break;
		default:
			MISSING_CASE(port);
			break;
		}
6004 6005
	}

6006 6007
	pp_on |= port_sel;

6008 6009
	I915_WRITE(regs.pp_on, pp_on);
	I915_WRITE(regs.pp_off, pp_off);
6010 6011
	if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
	    HAS_PCH_ICP(dev_priv))
6012
		I915_WRITE(regs.pp_ctrl, pp_div);
6013
	else
6014
		I915_WRITE(regs.pp_div, pp_div);
6015 6016

	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6017 6018
		      I915_READ(regs.pp_on),
		      I915_READ(regs.pp_off),
6019 6020
		      (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)  ||
		       HAS_PCH_ICP(dev_priv)) ?
6021 6022
		      (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
		      I915_READ(regs.pp_div));
6023 6024
}

6025
static void intel_dp_pps_init(struct intel_dp *intel_dp)
6026
{
6027
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6028 6029

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6030 6031
		vlv_initial_power_sequencer_setup(intel_dp);
	} else {
6032 6033
		intel_dp_init_panel_power_sequencer(intel_dp);
		intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6034 6035 6036
	}
}

6037 6038
/**
 * intel_dp_set_drrs_state - program registers for RR switch to take effect
6039
 * @dev_priv: i915 device
6040
 * @crtc_state: a pointer to the active intel_crtc_state
6041 6042 6043 6044 6045 6046 6047 6048 6049
 * @refresh_rate: RR to be programmed
 *
 * This function gets called when refresh rate (RR) has to be changed from
 * one frequency to another. Switches can be between high and low RR
 * supported by the panel or to any other RR based on media playback (in
 * this case, RR value needs to be passed from user space).
 *
 * The caller of this function needs to take a lock on dev_priv->drrs.
 */
6050
static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6051
				    const struct intel_crtc_state *crtc_state,
6052
				    int refresh_rate)
6053 6054
{
	struct intel_encoder *encoder;
6055 6056
	struct intel_digital_port *dig_port = NULL;
	struct intel_dp *intel_dp = dev_priv->drrs.dp;
6057
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
6058
	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6059 6060 6061 6062 6063 6064

	if (refresh_rate <= 0) {
		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
		return;
	}

6065 6066
	if (intel_dp == NULL) {
		DRM_DEBUG_KMS("DRRS not supported.\n");
6067 6068 6069
		return;
	}

6070 6071
	dig_port = dp_to_dig_port(intel_dp);
	encoder = &dig_port->base;
6072 6073 6074 6075 6076 6077

	if (!intel_crtc) {
		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
		return;
	}

6078
	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6079 6080 6081 6082
		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
		return;
	}

6083 6084
	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
			refresh_rate)
6085 6086
		index = DRRS_LOW_RR;

6087
	if (index == dev_priv->drrs.refresh_rate_type) {
6088 6089 6090 6091 6092
		DRM_DEBUG_KMS(
			"DRRS requested for previously set RR...ignoring\n");
		return;
	}

6093
	if (!crtc_state->base.active) {
6094 6095 6096 6097
		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
		return;
	}

6098
	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6099 6100
		switch (index) {
		case DRRS_HIGH_RR:
6101
			intel_dp_set_m_n(crtc_state, M1_N1);
6102 6103
			break;
		case DRRS_LOW_RR:
6104
			intel_dp_set_m_n(crtc_state, M2_N2);
6105 6106 6107 6108 6109
			break;
		case DRRS_MAX_RR:
		default:
			DRM_ERROR("Unsupported refreshrate type\n");
		}
6110 6111
	} else if (INTEL_GEN(dev_priv) > 6) {
		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6112
		u32 val;
6113

6114
		val = I915_READ(reg);
6115
		if (index > DRRS_HIGH_RR) {
6116
			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6117 6118 6119
				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
			else
				val |= PIPECONF_EDP_RR_MODE_SWITCH;
6120
		} else {
6121
			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6122 6123 6124
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
			else
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6125 6126 6127 6128
		}
		I915_WRITE(reg, val);
	}

6129 6130 6131 6132 6133
	dev_priv->drrs.refresh_rate_type = index;

	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
}

6134 6135 6136
/**
 * intel_edp_drrs_enable - init drrs struct if supported
 * @intel_dp: DP struct
6137
 * @crtc_state: A pointer to the active crtc state.
6138 6139 6140
 *
 * Initializes frontbuffer_bits and drrs.dp
 */
6141
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6142
			   const struct intel_crtc_state *crtc_state)
V
Vandana Kannan 已提交
6143
{
6144
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
V
Vandana Kannan 已提交
6145

6146
	if (!crtc_state->has_drrs) {
V
Vandana Kannan 已提交
6147 6148 6149 6150
		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
		return;
	}

6151 6152 6153 6154 6155
	if (dev_priv->psr.enabled) {
		DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
		return;
	}

V
Vandana Kannan 已提交
6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169
	mutex_lock(&dev_priv->drrs.mutex);
	if (WARN_ON(dev_priv->drrs.dp)) {
		DRM_ERROR("DRRS already enabled\n");
		goto unlock;
	}

	dev_priv->drrs.busy_frontbuffer_bits = 0;

	dev_priv->drrs.dp = intel_dp;

unlock:
	mutex_unlock(&dev_priv->drrs.mutex);
}

6170 6171 6172
/**
 * intel_edp_drrs_disable - Disable DRRS
 * @intel_dp: DP struct
6173
 * @old_crtc_state: Pointer to old crtc_state.
6174 6175
 *
 */
6176
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6177
			    const struct intel_crtc_state *old_crtc_state)
V
Vandana Kannan 已提交
6178
{
6179
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
V
Vandana Kannan 已提交
6180

6181
	if (!old_crtc_state->has_drrs)
V
Vandana Kannan 已提交
6182 6183 6184 6185 6186 6187 6188 6189 6190
		return;

	mutex_lock(&dev_priv->drrs.mutex);
	if (!dev_priv->drrs.dp) {
		mutex_unlock(&dev_priv->drrs.mutex);
		return;
	}

	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6191 6192
		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
			intel_dp->attached_connector->panel.fixed_mode->vrefresh);
V
Vandana Kannan 已提交
6193 6194 6195 6196 6197 6198 6199

	dev_priv->drrs.dp = NULL;
	mutex_unlock(&dev_priv->drrs.mutex);

	cancel_delayed_work_sync(&dev_priv->drrs.work);
}

6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212
static void intel_edp_drrs_downclock_work(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv), drrs.work.work);
	struct intel_dp *intel_dp;

	mutex_lock(&dev_priv->drrs.mutex);

	intel_dp = dev_priv->drrs.dp;

	if (!intel_dp)
		goto unlock;

6213
	/*
6214 6215
	 * The delayed work can race with an invalidate hence we need to
	 * recheck.
6216 6217
	 */

6218 6219
	if (dev_priv->drrs.busy_frontbuffer_bits)
		goto unlock;
6220

6221 6222 6223 6224 6225 6226
	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;

		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
			intel_dp->attached_connector->panel.downclock_mode->vrefresh);
	}
6227

6228 6229
unlock:
	mutex_unlock(&dev_priv->drrs.mutex);
6230 6231
}

6232
/**
6233
 * intel_edp_drrs_invalidate - Disable Idleness DRRS
6234
 * @dev_priv: i915 device
6235 6236
 * @frontbuffer_bits: frontbuffer plane tracking bits
 *
6237 6238
 * This function gets called everytime rendering on the given planes start.
 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6239 6240 6241
 *
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
 */
6242 6243
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
			       unsigned int frontbuffer_bits)
6244 6245 6246 6247
{
	struct drm_crtc *crtc;
	enum pipe pipe;

6248
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6249 6250
		return;

6251
	cancel_delayed_work(&dev_priv->drrs.work);
6252

6253
	mutex_lock(&dev_priv->drrs.mutex);
6254 6255 6256 6257 6258
	if (!dev_priv->drrs.dp) {
		mutex_unlock(&dev_priv->drrs.mutex);
		return;
	}

6259 6260 6261
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
	pipe = to_intel_crtc(crtc)->pipe;

6262 6263 6264
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;

6265
	/* invalidate means busy screen hence upclock */
6266
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6267 6268
		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
			dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6269 6270 6271 6272

	mutex_unlock(&dev_priv->drrs.mutex);
}

6273
/**
6274
 * intel_edp_drrs_flush - Restart Idleness DRRS
6275
 * @dev_priv: i915 device
6276 6277
 * @frontbuffer_bits: frontbuffer plane tracking bits
 *
6278 6279 6280 6281
 * This function gets called every time rendering on the given planes has
 * completed or flip on a crtc is completed. So DRRS should be upclocked
 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
 * if no other planes are dirty.
6282 6283 6284
 *
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
 */
6285 6286
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
			  unsigned int frontbuffer_bits)
6287 6288 6289 6290
{
	struct drm_crtc *crtc;
	enum pipe pipe;

6291
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6292 6293
		return;

6294
	cancel_delayed_work(&dev_priv->drrs.work);
6295

6296
	mutex_lock(&dev_priv->drrs.mutex);
6297 6298 6299 6300 6301
	if (!dev_priv->drrs.dp) {
		mutex_unlock(&dev_priv->drrs.mutex);
		return;
	}

6302 6303
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
	pipe = to_intel_crtc(crtc)->pipe;
6304 6305

	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6306 6307
	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;

6308
	/* flush means busy screen hence upclock */
6309
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6310 6311
		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
				dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6312 6313 6314 6315 6316 6317

	/*
	 * flush also means no more activity hence schedule downclock, if all
	 * other fbs are quiescent too
	 */
	if (!dev_priv->drrs.busy_frontbuffer_bits)
6318 6319 6320 6321 6322
		schedule_delayed_work(&dev_priv->drrs.work,
				msecs_to_jiffies(1000));
	mutex_unlock(&dev_priv->drrs.mutex);
}

6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345
/**
 * DOC: Display Refresh Rate Switching (DRRS)
 *
 * Display Refresh Rate Switching (DRRS) is a power conservation feature
 * which enables swtching between low and high refresh rates,
 * dynamically, based on the usage scenario. This feature is applicable
 * for internal panels.
 *
 * Indication that the panel supports DRRS is given by the panel EDID, which
 * would list multiple refresh rates for one resolution.
 *
 * DRRS is of 2 types - static and seamless.
 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
 * (may appear as a blink on screen) and is used in dock-undock scenario.
 * Seamless DRRS involves changing RR without any visual effect to the user
 * and can be used during normal system usage. This is done by programming
 * certain registers.
 *
 * Support for static/seamless DRRS may be indicated in the VBT based on
 * inputs from the panel spec.
 *
 * DRRS saves power by switching to low RR based on usage scenarios.
 *
D
Daniel Vetter 已提交
6346 6347 6348 6349 6350 6351 6352 6353
 * The implementation is based on frontbuffer tracking implementation.  When
 * there is a disturbance on the screen triggered by user activity or a periodic
 * system activity, DRRS is disabled (RR is changed to high RR).  When there is
 * no movement on screen, after a timeout of 1 second, a switch to low RR is
 * made.
 *
 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
 * and intel_edp_drrs_flush() are called.
6354 6355 6356 6357 6358 6359 6360 6361
 *
 * DRRS can be further extended to support other internal panels and also
 * the scenario of video playback wherein RR is set based on the rate
 * requested by userspace.
 */

/**
 * intel_dp_drrs_init - Init basic DRRS work and mutex.
6362
 * @connector: eDP connector
6363 6364 6365 6366 6367 6368 6369 6370 6371 6372
 * @fixed_mode: preferred mode of panel
 *
 * This function is  called only once at driver load to initialize basic
 * DRRS stuff.
 *
 * Returns:
 * Downclock mode if panel supports it, else return NULL.
 * DRRS support is determined by the presence of downclock mode (apart
 * from VBT setting).
 */
6373
static struct drm_display_mode *
6374 6375
intel_dp_drrs_init(struct intel_connector *connector,
		   struct drm_display_mode *fixed_mode)
6376
{
6377
	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6378 6379
	struct drm_display_mode *downclock_mode = NULL;

6380 6381 6382
	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
	mutex_init(&dev_priv->drrs.mutex);

6383
	if (INTEL_GEN(dev_priv) <= 6) {
6384 6385 6386 6387 6388
		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
		return NULL;
	}

	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6389
		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6390 6391 6392
		return NULL;
	}

6393 6394
	downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
						    &connector->base);
6395 6396

	if (!downclock_mode) {
6397
		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6398 6399 6400
		return NULL;
	}

6401
	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6402

6403
	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6404
	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6405 6406 6407
	return downclock_mode;
}

6408
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6409
				     struct intel_connector *intel_connector)
6410
{
6411 6412
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct drm_device *dev = &dev_priv->drm;
6413
	struct drm_connector *connector = &intel_connector->base;
6414
	struct drm_display_mode *fixed_mode = NULL;
6415
	struct drm_display_mode *downclock_mode = NULL;
6416 6417 6418
	bool has_dpcd;
	struct drm_display_mode *scan;
	struct edid *edid;
6419
	enum pipe pipe = INVALID_PIPE;
6420

6421
	if (!intel_dp_is_edp(intel_dp))
6422 6423
		return true;

6424 6425
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);

6426 6427 6428 6429 6430 6431
	/*
	 * On IBX/CPT we may get here with LVDS already registered. Since the
	 * driver uses the only internal power sequencer available for both
	 * eDP and LVDS bail out early in this case to prevent interfering
	 * with an already powered-on LVDS power sequencer.
	 */
6432
	if (intel_get_lvds_encoder(&dev_priv->drm)) {
6433 6434 6435 6436 6437 6438
		WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
		DRM_INFO("LVDS was detected, not registering eDP\n");

		return false;
	}

6439
	pps_lock(intel_dp);
6440 6441

	intel_dp_init_panel_power_timestamps(intel_dp);
6442
	intel_dp_pps_init(intel_dp);
6443
	intel_edp_panel_vdd_sanitize(intel_dp);
6444

6445
	pps_unlock(intel_dp);
6446

6447
	/* Cache DPCD and EDID for edp. */
6448
	has_dpcd = intel_edp_init_dpcd(intel_dp);
6449

6450
	if (!has_dpcd) {
6451 6452
		/* if this fails, presume the device is a ghost */
		DRM_INFO("failed to retrieve link info, disabling eDP\n");
6453
		goto out_vdd_off;
6454 6455
	}

6456
	mutex_lock(&dev->mode_config.mutex);
6457
	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
6458 6459
	if (edid) {
		if (drm_add_edid_modes(connector, edid)) {
6460
			drm_connector_update_edid_property(connector,
6461 6462 6463 6464 6465 6466 6467 6468 6469 6470
								edid);
		} else {
			kfree(edid);
			edid = ERR_PTR(-EINVAL);
		}
	} else {
		edid = ERR_PTR(-ENOENT);
	}
	intel_connector->edid = edid;

6471
	/* prefer fixed mode from EDID if available */
6472 6473 6474
	list_for_each_entry(scan, &connector->probed_modes, head) {
		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
			fixed_mode = drm_mode_duplicate(dev, scan);
6475 6476
			downclock_mode = intel_dp_drrs_init(
						intel_connector, fixed_mode);
6477
			break;
6478 6479 6480 6481 6482 6483 6484
		}
	}

	/* fallback to VBT if available for eDP */
	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
		fixed_mode = drm_mode_duplicate(dev,
					dev_priv->vbt.lfp_lvds_vbt_mode);
6485
		if (fixed_mode) {
6486
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
6487 6488 6489
			connector->display_info.width_mm = fixed_mode->width_mm;
			connector->display_info.height_mm = fixed_mode->height_mm;
		}
6490
	}
6491
	mutex_unlock(&dev->mode_config.mutex);
6492

6493
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6494 6495
		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
		register_reboot_notifier(&intel_dp->edp_notifier);
6496 6497 6498 6499 6500 6501

		/*
		 * Figure out the current pipe for the initial backlight setup.
		 * If the current pipe isn't valid, try the PPS pipe, and if that
		 * fails just assume pipe A.
		 */
6502
		pipe = vlv_active_pipe(intel_dp);
6503 6504 6505 6506 6507 6508 6509 6510 6511

		if (pipe != PIPE_A && pipe != PIPE_B)
			pipe = intel_dp->pps_pipe;

		if (pipe != PIPE_A && pipe != PIPE_B)
			pipe = PIPE_A;

		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
			      pipe_name(pipe));
6512 6513
	}

6514
	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6515
	intel_connector->panel.backlight.power = intel_edp_backlight_power;
6516
	intel_panel_setup_backlight(connector, pipe);
6517

6518 6519 6520 6521
	if (fixed_mode)
		drm_connector_init_panel_orientation_property(
			connector, fixed_mode->hdisplay, fixed_mode->vdisplay);

6522
	return true;
6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534

out_vdd_off:
	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
	/*
	 * vdd might still be enabled do to the delayed vdd off.
	 * Make sure vdd is actually turned off here.
	 */
	pps_lock(intel_dp);
	edp_panel_vdd_off_sync(intel_dp);
	pps_unlock(intel_dp);

	return false;
6535 6536
}

6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552
static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
	struct intel_connector *intel_connector;
	struct drm_connector *connector;

	intel_connector = container_of(work, typeof(*intel_connector),
				       modeset_retry_work);
	connector = &intel_connector->base;
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
		      connector->name);

	/* Grab the locks before changing connector property*/
	mutex_lock(&connector->dev->mode_config.mutex);
	/* Set connector link status to BAD and send a Uevent to notify
	 * userspace to do a modeset.
	 */
6553 6554
	drm_connector_set_link_status_property(connector,
					       DRM_MODE_LINK_STATUS_BAD);
6555 6556 6557 6558 6559
	mutex_unlock(&connector->dev->mode_config.mutex);
	/* Send Hotplug uevent so userspace can reprobe */
	drm_kms_helper_hotplug_event(connector->dev);
}

6560
bool
6561 6562
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
			struct intel_connector *intel_connector)
6563
{
6564 6565 6566 6567
	struct drm_connector *connector = &intel_connector->base;
	struct intel_dp *intel_dp = &intel_dig_port->dp;
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
	struct drm_device *dev = intel_encoder->base.dev;
6568
	struct drm_i915_private *dev_priv = to_i915(dev);
6569
	enum port port = intel_encoder->port;
6570
	int type;
6571

6572 6573 6574 6575
	/* Initialize the work for modeset in case of link train failure */
	INIT_WORK(&intel_connector->modeset_retry_work,
		  intel_dp_modeset_retry_work_fn);

6576 6577 6578 6579 6580
	if (WARN(intel_dig_port->max_lanes < 1,
		 "Not enough lanes (%d) for DP on port %c\n",
		 intel_dig_port->max_lanes, port_name(port)))
		return false;

6581 6582
	intel_dp_set_source_rates(intel_dp);

6583
	intel_dp->reset_link_params = true;
6584
	intel_dp->pps_pipe = INVALID_PIPE;
6585
	intel_dp->active_pipe = INVALID_PIPE;
6586

6587
	/* intel_dp vfuncs */
6588
	if (HAS_DDI(dev_priv))
6589 6590
		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;

6591 6592
	/* Preserve the current hw state. */
	intel_dp->DP = I915_READ(intel_dp->output_reg);
6593
	intel_dp->attached_connector = intel_connector;
6594

6595
	if (intel_dp_is_port_edp(dev_priv, port))
6596
		type = DRM_MODE_CONNECTOR_eDP;
6597 6598
	else
		type = DRM_MODE_CONNECTOR_DisplayPort;
6599

6600 6601 6602
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		intel_dp->active_pipe = vlv_active_pipe(intel_dp);

6603 6604 6605 6606 6607 6608 6609 6610
	/*
	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
	 * for DP the encoder type can be set by the caller to
	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
	 */
	if (type == DRM_MODE_CONNECTOR_eDP)
		intel_encoder->type = INTEL_OUTPUT_EDP;

6611
	/* eDP only on port B and/or C on vlv/chv */
6612
	if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6613 6614
		    intel_dp_is_edp(intel_dp) &&
		    port != PORT_B && port != PORT_C))
6615 6616
		return false;

6617 6618 6619 6620
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
			port_name(port));

6621
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6622 6623
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);

6624
	if (!HAS_GMCH_DISPLAY(dev_priv))
6625
		connector->interlace_allowed = true;
6626 6627
	connector->doublescan_allowed = 0;

6628
	intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
6629

6630
	intel_dp_aux_init(intel_dp);
6631

6632
	intel_connector_attach_encoder(intel_connector, intel_encoder);
6633

6634
	if (HAS_DDI(dev_priv))
6635 6636 6637 6638
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
	else
		intel_connector->get_hw_state = intel_connector_get_hw_state;

6639
	/* init MST on ports that can support it */
6640
	if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
6641 6642
	    (port == PORT_B || port == PORT_C ||
	     port == PORT_D || port == PORT_F))
6643 6644
		intel_dp_mst_encoder_init(intel_dig_port,
					  intel_connector->base.base.id);
6645

6646
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6647 6648 6649
		intel_dp_aux_fini(intel_dp);
		intel_dp_mst_encoder_cleanup(intel_dig_port);
		goto fail;
6650
	}
6651

6652
	intel_dp_add_properties(intel_dp, connector);
6653

6654
	if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
6655 6656 6657 6658
		int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
		if (ret)
			DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
	}
6659

6660 6661 6662 6663
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
	 * 0xd.  Failure to do so will result in spurious interrupts being
	 * generated on the port when a cable is not attached.
	 */
6664
	if (IS_G45(dev_priv)) {
6665 6666 6667
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
	}
6668 6669

	return true;
6670 6671 6672 6673 6674

fail:
	drm_connector_cleanup(connector);

	return false;
6675
}
6676

6677
bool intel_dp_init(struct drm_i915_private *dev_priv,
6678 6679
		   i915_reg_t output_reg,
		   enum port port)
6680 6681 6682 6683 6684 6685
{
	struct intel_digital_port *intel_dig_port;
	struct intel_encoder *intel_encoder;
	struct drm_encoder *encoder;
	struct intel_connector *intel_connector;

6686
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6687
	if (!intel_dig_port)
6688
		return false;
6689

6690
	intel_connector = intel_connector_alloc();
S
Sudip Mukherjee 已提交
6691 6692
	if (!intel_connector)
		goto err_connector_alloc;
6693 6694 6695 6696

	intel_encoder = &intel_dig_port->base;
	encoder = &intel_encoder->base;

6697 6698 6699
	if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
			     &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
			     "DP %c", port_name(port)))
S
Sudip Mukherjee 已提交
6700
		goto err_encoder_init;
6701

6702
	intel_encoder->hotplug = intel_dp_hotplug;
6703
	intel_encoder->compute_config = intel_dp_compute_config;
P
Paulo Zanoni 已提交
6704
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
6705
	intel_encoder->get_config = intel_dp_get_config;
6706
	intel_encoder->suspend = intel_dp_encoder_suspend;
6707
	if (IS_CHERRYVIEW(dev_priv)) {
6708
		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6709 6710
		intel_encoder->pre_enable = chv_pre_enable_dp;
		intel_encoder->enable = vlv_enable_dp;
6711
		intel_encoder->disable = vlv_disable_dp;
6712
		intel_encoder->post_disable = chv_post_disable_dp;
6713
		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6714
	} else if (IS_VALLEYVIEW(dev_priv)) {
6715
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6716 6717
		intel_encoder->pre_enable = vlv_pre_enable_dp;
		intel_encoder->enable = vlv_enable_dp;
6718
		intel_encoder->disable = vlv_disable_dp;
6719
		intel_encoder->post_disable = vlv_post_disable_dp;
6720
	} else {
6721 6722
		intel_encoder->pre_enable = g4x_pre_enable_dp;
		intel_encoder->enable = g4x_enable_dp;
6723
		intel_encoder->disable = g4x_disable_dp;
6724
		intel_encoder->post_disable = g4x_post_disable_dp;
6725
	}
6726 6727

	intel_dig_port->dp.output_reg = output_reg;
6728
	intel_dig_port->max_lanes = 4;
6729

6730
	intel_encoder->type = INTEL_OUTPUT_DP;
6731
	intel_encoder->power_domain = intel_port_to_power_domain(port);
6732
	if (IS_CHERRYVIEW(dev_priv)) {
6733 6734 6735 6736 6737 6738 6739
		if (port == PORT_D)
			intel_encoder->crtc_mask = 1 << 2;
		else
			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
	} else {
		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
	}
6740
	intel_encoder->cloneable = 0;
6741
	intel_encoder->port = port;
6742

6743 6744
	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;

6745 6746 6747
	if (port != PORT_A)
		intel_infoframe_init(intel_dig_port);

6748
	intel_dig_port->aux_ch = intel_aux_ch(dev_priv, port);
S
Sudip Mukherjee 已提交
6749 6750 6751
	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
		goto err_init_connector;

6752
	return true;
S
Sudip Mukherjee 已提交
6753 6754 6755

err_init_connector:
	drm_encoder_cleanup(encoder);
S
Sudip Mukherjee 已提交
6756
err_encoder_init:
S
Sudip Mukherjee 已提交
6757 6758 6759
	kfree(intel_connector);
err_connector_alloc:
	kfree(intel_dig_port);
6760
	return false;
6761
}
6762

6763
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
6764
{
6765 6766 6767 6768
	struct intel_encoder *encoder;

	for_each_intel_encoder(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp;
6769

6770 6771
		if (encoder->type != INTEL_OUTPUT_DDI)
			continue;
6772

6773
		intel_dp = enc_to_intel_dp(&encoder->base);
6774

6775
		if (!intel_dp->can_mst)
6776 6777
			continue;

6778 6779
		if (intel_dp->is_mst)
			drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
6780 6781 6782
	}
}

6783
void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
6784
{
6785
	struct intel_encoder *encoder;
6786

6787 6788
	for_each_intel_encoder(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp;
6789
		int ret;
6790

6791 6792 6793 6794 6795 6796
		if (encoder->type != INTEL_OUTPUT_DDI)
			continue;

		intel_dp = enc_to_intel_dp(&encoder->base);

		if (!intel_dp->can_mst)
6797
			continue;
6798

6799
		ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
6800
		if (ret)
6801
			intel_dp_check_mst_status(intel_dp);
6802 6803
	}
}