intel_dp.c 236.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Keith Packard <keithp@keithp.com>
 *
 */

28
#include <linux/export.h>
29
#include <linux/i2c.h>
30 31
#include <linux/notifier.h>
#include <linux/reboot.h>
32 33
#include <linux/slab.h>
#include <linux/types.h>
34

35
#include <asm/byteorder.h>
36

37
#include <drm/drm_atomic_helper.h>
38
#include <drm/drm_crtc.h>
39
#include <drm/drm_dp_helper.h>
40
#include <drm/drm_edid.h>
41
#include <drm/drm_hdcp.h>
42
#include <drm/drm_probe_helper.h>
43

44
#include "i915_debugfs.h"
45
#include "i915_drv.h"
46
#include "i915_trace.h"
47
#include "intel_atomic.h"
48
#include "intel_audio.h"
49
#include "intel_connector.h"
50
#include "intel_ddi.h"
51
#include "intel_display_types.h"
52
#include "intel_dp.h"
53
#include "intel_dp_link_training.h"
54
#include "intel_dp_mst.h"
55
#include "intel_dpio_phy.h"
56
#include "intel_fifo_underrun.h"
57
#include "intel_hdcp.h"
58
#include "intel_hdmi.h"
59
#include "intel_hotplug.h"
60
#include "intel_lspcon.h"
61
#include "intel_lvds.h"
62
#include "intel_panel.h"
63
#include "intel_psr.h"
64
#include "intel_sideband.h"
65
#include "intel_tc.h"
66
#include "intel_vdsc.h"
67

68
#define DP_DPRX_ESI_LEN 14
69

70 71 72 73 74
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE			2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0		340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1		400000

75 76
/* DP DSC FEC Overhead factor = 1/(0.972261) */
#define DP_DSC_FEC_OVERHEAD_FACTOR		972261
77

78 79 80 81 82 83
/* Compliance test status bits  */
#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)

84
struct dp_link_dpll {
85
	int clock;
86 87 88
	struct dpll dpll;
};

89
static const struct dp_link_dpll g4x_dpll[] = {
90
	{ 162000,
91
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
92
	{ 270000,
93 94 95 96
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
};

static const struct dp_link_dpll pch_dpll[] = {
97
	{ 162000,
98
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
99
	{ 270000,
100 101 102
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
};

103
static const struct dp_link_dpll vlv_dpll[] = {
104
	{ 162000,
C
Chon Ming Lee 已提交
105
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
106
	{ 270000,
107 108 109
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
};

110 111 112 113 114 115 116 117 118 119
/*
 * CHV supports eDP 1.4 that have  more link rates.
 * Below only provides the fixed rate but exclude variable rate.
 */
static const struct dp_link_dpll chv_dpll[] = {
	/*
	 * CHV requires to program fractional division for m2.
	 * m2 is stored in fixed point format using formula below
	 * (m2_int << 22) | m2_fraction
	 */
120
	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
121
		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
122
	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
123 124
		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
125

126 127 128 129 130 131 132 133
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};

/* With Single pipe configuration, HW is capable of supporting maximum
 * of 4 slices per line.
 */
static const u8 valid_dsc_slicecount[] = {1, 2, 4};

134
/**
135
 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
136 137 138 139
 * @intel_dp: DP struct
 *
 * If a CPU or PCH DP output is attached to an eDP panel, this function
 * will return true, and false otherwise.
140 141
 *
 * This function is not safe to use prior to encoder type being set.
142
 */
143
bool intel_dp_is_edp(struct intel_dp *intel_dp)
144
{
145
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
146

147
	return dig_port->base.type == INTEL_OUTPUT_EDP;
148 149
}

150 151
static void intel_dp_link_down(struct intel_encoder *encoder,
			       const struct intel_crtc_state *old_crtc_state);
152
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
153
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
154 155
static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
					   const struct intel_crtc_state *crtc_state);
156
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
157
				      enum pipe pipe);
158
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
159

160 161 162
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
163
	static const int dp_rates[] = {
164
		162000, 270000, 540000, 810000
165
	};
166
	int i, max_rate;
167

168 169 170 171 172 173 174 175 176 177 178
	if (drm_dp_has_quirk(&intel_dp->desc, 0,
			     DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
		/* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
		static const int quirk_rates[] = { 162000, 270000, 324000 };

		memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
		intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);

		return;
	}

179
	max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
180

181 182
	for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
		if (dp_rates[i] > max_rate)
183
			break;
184
		intel_dp->sink_rates[i] = dp_rates[i];
185
	}
186

187
	intel_dp->num_sink_rates = i;
188 189
}

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/* Get length of rates array potentially limited by max_rate. */
static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
{
	int i;

	/* Limit results by potentially reduced max rate */
	for (i = 0; i < len; i++) {
		if (rates[len - i - 1] <= max_rate)
			return len - i;
	}

	return 0;
}

/* Get length of common rates array potentially limited by max_rate. */
static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
					  int max_rate)
{
	return intel_dp_rate_limit_len(intel_dp->common_rates,
				       intel_dp->num_common_rates, max_rate);
}

212 213
/* Theoretical max between source and sink */
static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
214
{
215
	return intel_dp->common_rates[intel_dp->num_common_rates - 1];
216 217
}

218 219
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
220
{
221 222
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	int source_max = dig_port->max_lanes;
223
	int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
224
	int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
225

226
	return min3(source_max, sink_max, fia_max);
227 228
}

229
int intel_dp_max_lane_count(struct intel_dp *intel_dp)
230 231 232 233
{
	return intel_dp->max_link_lane_count;
}

234
int
235
intel_dp_link_required(int pixel_clock, int bpp)
236
{
237 238
	/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
	return DIV_ROUND_UP(pixel_clock * bpp, 8);
239 240
}

241
int
242 243
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
244 245 246 247 248 249 250
	/* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
	 * link rate that is generally expressed in Gbps. Since, 8 bits of data
	 * is transmitted every LS_Clk per lane, there is no need to account for
	 * the channel encoding that is done in the PHY layer here.
	 */

	return max_link_clock * max_lanes;
251 252
}

253 254 255
static int
intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
{
256 257
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct intel_encoder *encoder = &dig_port->base;
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	int max_dotclk = dev_priv->max_dotclk_freq;
	int ds_max_dotclk;

	int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;

	if (type != DP_DS_PORT_TYPE_VGA)
		return max_dotclk;

	ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
						    intel_dp->downstream_ports);

	if (ds_max_dotclk != 0)
		max_dotclk = min(max_dotclk, ds_max_dotclk);

	return max_dotclk;
}

276
static int cnl_max_source_rate(struct intel_dp *intel_dp)
277 278 279 280 281
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
	enum port port = dig_port->base.port;

282
	u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
283 284 285

	/* Low voltage SKUs are limited to max of 5.4G */
	if (voltage == VOLTAGE_INFO_0_85V)
286
		return 540000;
287 288 289

	/* For this SKU 8.1G is supported in all ports */
	if (IS_CNL_WITH_PORT_F(dev_priv))
290
		return 810000;
291

292
	/* For other SKUs, max rate on ports A and D is 5.4G */
293
	if (port == PORT_A || port == PORT_D)
294
		return 540000;
295

296
	return 810000;
297 298
}

299 300 301
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
302
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
303
	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
304

305
	if (intel_phy_is_combo(dev_priv, phy) &&
306
	    !IS_ELKHARTLAKE(dev_priv) &&
307
	    !intel_dp_is_edp(intel_dp))
308 309 310 311 312
		return 540000;

	return 810000;
}

313 314
static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
315
{
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	/* The values must be in increasing order */
	static const int cnl_rates[] = {
		162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
	};
	static const int bxt_rates[] = {
		162000, 216000, 243000, 270000, 324000, 432000, 540000
	};
	static const int skl_rates[] = {
		162000, 216000, 270000, 324000, 432000, 540000
	};
	static const int hsw_rates[] = {
		162000, 270000, 540000
	};
	static const int g4x_rates[] = {
		162000, 270000
	};
332
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
333
	struct intel_encoder *encoder = &dig_port->base;
334
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
335
	const int *source_rates;
336
	int size, max_rate = 0, vbt_max_rate;
337

338
	/* This should only be done once */
339 340
	drm_WARN_ON(&dev_priv->drm,
		    intel_dp->source_rates || intel_dp->num_source_rates);
341

342
	if (INTEL_GEN(dev_priv) >= 10) {
343
		source_rates = cnl_rates;
344
		size = ARRAY_SIZE(cnl_rates);
345
		if (IS_GEN(dev_priv, 10))
346 347 348
			max_rate = cnl_max_source_rate(intel_dp);
		else
			max_rate = icl_max_source_rate(intel_dp);
349 350 351
	} else if (IS_GEN9_LP(dev_priv)) {
		source_rates = bxt_rates;
		size = ARRAY_SIZE(bxt_rates);
352
	} else if (IS_GEN9_BC(dev_priv)) {
353
		source_rates = skl_rates;
354
		size = ARRAY_SIZE(skl_rates);
355 356
	} else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
		   IS_BROADWELL(dev_priv)) {
357 358
		source_rates = hsw_rates;
		size = ARRAY_SIZE(hsw_rates);
359
	} else {
360 361
		source_rates = g4x_rates;
		size = ARRAY_SIZE(g4x_rates);
362 363
	}

364
	vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
365 366 367 368 369
	if (max_rate && vbt_max_rate)
		max_rate = min(max_rate, vbt_max_rate);
	else if (vbt_max_rate)
		max_rate = vbt_max_rate;

370 371 372
	if (max_rate)
		size = intel_dp_rate_limit_len(source_rates, size, max_rate);

373 374
	intel_dp->source_rates = source_rates;
	intel_dp->num_source_rates = size;
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
}

static int intersect_rates(const int *source_rates, int source_len,
			   const int *sink_rates, int sink_len,
			   int *common_rates)
{
	int i = 0, j = 0, k = 0;

	while (i < source_len && j < sink_len) {
		if (source_rates[i] == sink_rates[j]) {
			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
				return k;
			common_rates[k] = source_rates[i];
			++k;
			++i;
			++j;
		} else if (source_rates[i] < sink_rates[j]) {
			++i;
		} else {
			++j;
		}
	}
	return k;
}

400 401 402 403 404 405 406 407 408 409 410 411
/* return index of rate in rates array, or -1 if not found */
static int intel_dp_rate_index(const int *rates, int len, int rate)
{
	int i;

	for (i = 0; i < len; i++)
		if (rate == rates[i])
			return i;

	return -1;
}

412
static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
413
{
414 415 416 417
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	drm_WARN_ON(&i915->drm,
		    !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
418

419 420 421 422 423 424 425
	intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
						     intel_dp->num_source_rates,
						     intel_dp->sink_rates,
						     intel_dp->num_sink_rates,
						     intel_dp->common_rates);

	/* Paranoia, there should always be something in common. */
426
	if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
427
		intel_dp->common_rates[0] = 162000;
428 429 430 431
		intel_dp->num_common_rates = 1;
	}
}

432
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
433
				       u8 lane_count)
434 435 436 437 438 439
{
	/*
	 * FIXME: we need to synchronize the current link parameters with
	 * hardware readout. Currently fast link training doesn't work on
	 * boot-up.
	 */
440 441
	if (link_rate == 0 ||
	    link_rate > intel_dp->max_link_rate)
442 443
		return false;

444 445
	if (lane_count == 0 ||
	    lane_count > intel_dp_max_lane_count(intel_dp))
446 447 448 449 450
		return false;

	return true;
}

451 452
static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
						     int link_rate,
453
						     u8 lane_count)
454 455 456 457 458 459 460 461 462 463 464 465 466
{
	const struct drm_display_mode *fixed_mode =
		intel_dp->attached_connector->panel.fixed_mode;
	int mode_rate, max_rate;

	mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
	max_rate = intel_dp_max_data_rate(link_rate, lane_count);
	if (mode_rate > max_rate)
		return false;

	return true;
}

467
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
468
					    int link_rate, u8 lane_count)
469
{
470
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
471
	int index;
472

473 474 475 476 477 478 479 480 481
	/*
	 * TODO: Enable fallback on MST links once MST link compute can handle
	 * the fallback params.
	 */
	if (intel_dp->is_mst) {
		drm_err(&i915->drm, "Link Training Unsuccessful\n");
		return -1;
	}

482 483 484 485
	index = intel_dp_rate_index(intel_dp->common_rates,
				    intel_dp->num_common_rates,
				    link_rate);
	if (index > 0) {
486 487 488 489
		if (intel_dp_is_edp(intel_dp) &&
		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
							      intel_dp->common_rates[index - 1],
							      lane_count)) {
490 491
			drm_dbg_kms(&i915->drm,
				    "Retrying Link training for eDP with same parameters\n");
492 493
			return 0;
		}
494 495
		intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
		intel_dp->max_link_lane_count = lane_count;
496
	} else if (lane_count > 1) {
497 498 499 500
		if (intel_dp_is_edp(intel_dp) &&
		    !intel_dp_can_link_train_fallback_for_edp(intel_dp,
							      intel_dp_max_common_rate(intel_dp),
							      lane_count >> 1)) {
501 502
			drm_dbg_kms(&i915->drm,
				    "Retrying Link training for eDP with same parameters\n");
503 504
			return 0;
		}
505
		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
506
		intel_dp->max_link_lane_count = lane_count >> 1;
507
	} else {
508
		drm_err(&i915->drm, "Link Training Unsuccessful\n");
509 510 511 512 513 514
		return -1;
	}

	return 0;
}

515 516 517 518 519 520
u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
{
	return div_u64(mul_u32_u32(mode_clock, 1000000U),
		       DP_DSC_FEC_OVERHEAD_FACTOR);
}

521 522 523 524 525 526 527 528 529 530 531
static int
small_joiner_ram_size_bits(struct drm_i915_private *i915)
{
	if (INTEL_GEN(i915) >= 11)
		return 7680 * 8;
	else
		return 6144 * 8;
}

static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
				       u32 link_clock, u32 lane_count,
532 533 534 535 536 537 538 539 540 541 542 543 544
				       u32 mode_clock, u32 mode_hdisplay)
{
	u32 bits_per_pixel, max_bpp_small_joiner_ram;
	int i;

	/*
	 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
	 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
	 * for SST -> TimeSlotsPerMTP is 1,
	 * for MST -> TimeSlotsPerMTP has to be calculated
	 */
	bits_per_pixel = (link_clock * lane_count * 8) /
			 intel_dp_mode_to_fec_clock(mode_clock);
545
	drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
546 547

	/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
548 549
	max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
		mode_hdisplay;
550 551
	drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
		    max_bpp_small_joiner_ram);
552 553 554 555 556 557 558 559 560

	/*
	 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
	 * check, output bpp from small joiner RAM check)
	 */
	bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);

	/* Error out if the max bpp is less than smallest allowed valid bpp */
	if (bits_per_pixel < valid_dsc_bpp[0]) {
561 562
		drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
			    bits_per_pixel, valid_dsc_bpp[0]);
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
		return 0;
	}

	/* Find the nearest match in the array of known BPPs from VESA */
	for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
		if (bits_per_pixel < valid_dsc_bpp[i + 1])
			break;
	}
	bits_per_pixel = valid_dsc_bpp[i];

	/*
	 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
	 * fractional part is 0
	 */
	return bits_per_pixel << 4;
}

static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
				       int mode_clock, int mode_hdisplay)
{
583
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
584 585 586 587 588 589 590 591 592 593 594 595
	u8 min_slice_count, i;
	int max_slice_width;

	if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
		min_slice_count = DIV_ROUND_UP(mode_clock,
					       DP_DSC_MAX_ENC_THROUGHPUT_0);
	else
		min_slice_count = DIV_ROUND_UP(mode_clock,
					       DP_DSC_MAX_ENC_THROUGHPUT_1);

	max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
	if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
596 597 598
		drm_dbg_kms(&i915->drm,
			    "Unsupported slice width %d by DP DSC Sink device\n",
			    max_slice_width);
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
		return 0;
	}
	/* Also take into account max slice width */
	min_slice_count = min_t(u8, min_slice_count,
				DIV_ROUND_UP(mode_hdisplay,
					     max_slice_width));

	/* Find the closest match to the valid slice count values */
	for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
		if (valid_dsc_slicecount[i] >
		    drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
						    false))
			break;
		if (min_slice_count  <= valid_dsc_slicecount[i])
			return valid_dsc_slicecount[i];
	}

616 617
	drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
		    min_slice_count);
618 619 620
	return 0;
}

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
				  int hdisplay)
{
	/*
	 * Older platforms don't like hdisplay==4096 with DP.
	 *
	 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
	 * and frame counter increment), but we don't get vblank interrupts,
	 * and the pipe underruns immediately. The link also doesn't seem
	 * to get trained properly.
	 *
	 * On CHV the vblank interrupts don't seem to disappear but
	 * otherwise the symptoms are similar.
	 *
	 * TODO: confirm the behaviour on HSW+
	 */
	return hdisplay == 4096 && !HAS_DDI(dev_priv);
}

640
static enum drm_mode_status
641 642 643
intel_dp_mode_valid(struct drm_connector *connector,
		    struct drm_display_mode *mode)
{
644
	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
645 646
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
647
	struct drm_i915_private *dev_priv = to_i915(connector->dev);
648 649
	int target_clock = mode->clock;
	int max_rate, mode_rate, max_lanes, max_link_clock;
650
	int max_dotclk;
651 652
	u16 dsc_max_output_bpp = 0;
	u8 dsc_slice_count = 0;
653

654 655 656
	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
		return MODE_NO_DBLESCAN;

657
	max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
658

659
	if (intel_dp_is_edp(intel_dp) && fixed_mode) {
660
		if (mode->hdisplay > fixed_mode->hdisplay)
661 662
			return MODE_PANEL;

663
		if (mode->vdisplay > fixed_mode->vdisplay)
664
			return MODE_PANEL;
665 666

		target_clock = fixed_mode->clock;
667 668
	}

669
	max_link_clock = intel_dp_max_link_rate(intel_dp);
670
	max_lanes = intel_dp_max_lane_count(intel_dp);
671 672 673 674

	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
	mode_rate = intel_dp_link_required(target_clock, 18);

675 676 677
	if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
		return MODE_H_ILLEGAL;

678 679 680 681 682 683 684 685 686 687 688 689
	/*
	 * Output bpp is stored in 6.4 format so right shift by 4 to get the
	 * integer value since we support only integer values of bpp.
	 */
	if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
	    drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
		if (intel_dp_is_edp(intel_dp)) {
			dsc_max_output_bpp =
				drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
			dsc_slice_count =
				drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
								true);
690
		} else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
691
			dsc_max_output_bpp =
692 693
				intel_dp_dsc_get_output_bpp(dev_priv,
							    max_link_clock,
694 695 696 697 698 699 700 701 702 703 704 705
							    max_lanes,
							    target_clock,
							    mode->hdisplay) >> 4;
			dsc_slice_count =
				intel_dp_dsc_get_slice_count(intel_dp,
							     target_clock,
							     mode->hdisplay);
		}
	}

	if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
	    target_clock > max_dotclk)
706
		return MODE_CLOCK_HIGH;
707 708 709 710

	if (mode->clock < 10000)
		return MODE_CLOCK_LOW;

711 712 713
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
		return MODE_H_ILLEGAL;

714
	return intel_mode_valid_max_plane_size(dev_priv, mode);
715 716
}

717
u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
718
{
719 720
	int i;
	u32 v = 0;
721 722 723 724

	if (src_bytes > 4)
		src_bytes = 4;
	for (i = 0; i < src_bytes; i++)
725
		v |= ((u32)src[i]) << ((3 - i) * 8);
726 727 728
	return v;
}

729
static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
730 731 732 733 734 735 736 737
{
	int i;
	if (dst_bytes > 4)
		dst_bytes = 4;
	for (i = 0; i < dst_bytes; i++)
		dst[i] = src >> ((3-i) * 8);
}

738
static void
739
intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
740
static void
741
intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
742
					      bool force_disable_vdd);
743
static void
744
intel_dp_pps_init(struct intel_dp *intel_dp);
745

746 747
static intel_wakeref_t
pps_lock(struct intel_dp *intel_dp)
748
{
749
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
750
	intel_wakeref_t wakeref;
751 752

	/*
753
	 * See intel_power_sequencer_reset() why we need
754 755
	 * a power domain reference here.
	 */
756 757
	wakeref = intel_display_power_get(dev_priv,
					  intel_aux_power_domain(dp_to_dig_port(intel_dp)));
758 759

	mutex_lock(&dev_priv->pps_mutex);
760 761

	return wakeref;
762 763
}

764 765
static intel_wakeref_t
pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
766
{
767
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
768 769

	mutex_unlock(&dev_priv->pps_mutex);
770 771 772 773
	intel_display_power_put(dev_priv,
				intel_aux_power_domain(dp_to_dig_port(intel_dp)),
				wakeref);
	return 0;
774 775
}

776 777 778
#define with_pps_lock(dp, wf) \
	for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))

779 780 781
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
782
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
783
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
784
	enum pipe pipe = intel_dp->pps_pipe;
785 786 787
	bool pll_enabled, release_cl_override = false;
	enum dpio_phy phy = DPIO_PHY(pipe);
	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
788
	u32 DP;
789

790 791 792
	if (drm_WARN(&dev_priv->drm,
		     intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
		     "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
793 794
		     pipe_name(pipe), dig_port->base.base.base.id,
		     dig_port->base.base.name))
795 796
		return;

797 798
	drm_dbg_kms(&dev_priv->drm,
		    "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
799 800
		    pipe_name(pipe), dig_port->base.base.base.id,
		    dig_port->base.base.name);
801 802 803 804

	/* Preserve the BIOS-computed detected bit. This is
	 * supposed to be read-only.
	 */
805
	DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
806 807 808 809
	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
	DP |= DP_PORT_WIDTH(1);
	DP |= DP_LINK_TRAIN_PAT_1;

810
	if (IS_CHERRYVIEW(dev_priv))
811 812 813
		DP |= DP_PIPE_SEL_CHV(pipe);
	else
		DP |= DP_PIPE_SEL(pipe);
814

815
	pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
816 817 818 819 820

	/*
	 * The DPLL for the pipe must be enabled for this to work.
	 * So enable temporarily it if it's not already enabled.
	 */
821
	if (!pll_enabled) {
822
		release_cl_override = IS_CHERRYVIEW(dev_priv) &&
823 824
			!chv_phy_powergate_ch(dev_priv, phy, ch, true);

825
		if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
826
				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
827 828 829
			drm_err(&dev_priv->drm,
				"Failed to force on pll for pipe %c!\n",
				pipe_name(pipe));
830 831
			return;
		}
832
	}
833

834 835 836
	/*
	 * Similar magic as in intel_dp_enable_port().
	 * We _must_ do this port enable + disable trick
837
	 * to make this power sequencer lock onto the port.
838 839
	 * Otherwise even VDD force bit won't work.
	 */
840 841
	intel_de_write(dev_priv, intel_dp->output_reg, DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
842

843 844
	intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
845

846 847
	intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
848

849
	if (!pll_enabled) {
850
		vlv_force_pll_off(dev_priv, pipe);
851 852 853 854

		if (release_cl_override)
			chv_phy_powergate_ch(dev_priv, phy, ch, false);
	}
855 856
}

857 858 859 860 861 862 863 864 865
static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
{
	struct intel_encoder *encoder;
	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);

	/*
	 * We don't have power sequencer currently.
	 * Pick one that's not used by other ports.
	 */
866
	for_each_intel_dp(&dev_priv->drm, encoder) {
867
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
868 869

		if (encoder->type == INTEL_OUTPUT_EDP) {
870 871 872 873
			drm_WARN_ON(&dev_priv->drm,
				    intel_dp->active_pipe != INVALID_PIPE &&
				    intel_dp->active_pipe !=
				    intel_dp->pps_pipe);
874 875 876 877

			if (intel_dp->pps_pipe != INVALID_PIPE)
				pipes &= ~(1 << intel_dp->pps_pipe);
		} else {
878 879
			drm_WARN_ON(&dev_priv->drm,
				    intel_dp->pps_pipe != INVALID_PIPE);
880 881 882 883 884 885 886 887 888 889 890 891

			if (intel_dp->active_pipe != INVALID_PIPE)
				pipes &= ~(1 << intel_dp->active_pipe);
		}
	}

	if (pipes == 0)
		return INVALID_PIPE;

	return ffs(pipes) - 1;
}

892 893 894
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
895
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
896
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
897
	enum pipe pipe;
898

V
Ville Syrjälä 已提交
899
	lockdep_assert_held(&dev_priv->pps_mutex);
900

901
	/* We should never land here with regular DP ports */
902
	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
903

904 905
	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
		    intel_dp->active_pipe != intel_dp->pps_pipe);
906

907 908 909
	if (intel_dp->pps_pipe != INVALID_PIPE)
		return intel_dp->pps_pipe;

910
	pipe = vlv_find_free_pps(dev_priv);
911 912 913 914 915

	/*
	 * Didn't find one. This should not happen since there
	 * are two power sequencers and up to two eDP ports.
	 */
916
	if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
917
		pipe = PIPE_A;
918

919
	vlv_steal_power_sequencer(dev_priv, pipe);
920
	intel_dp->pps_pipe = pipe;
921

922 923 924
	drm_dbg_kms(&dev_priv->drm,
		    "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
		    pipe_name(intel_dp->pps_pipe),
925 926
		    dig_port->base.base.base.id,
		    dig_port->base.base.name);
927 928

	/* init power sequencer on this pipe and port */
929 930
	intel_dp_init_panel_power_sequencer(intel_dp);
	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
931

932 933 934 935 936
	/*
	 * Even vdd force doesn't work until we've made
	 * the power sequencer lock in on the port.
	 */
	vlv_power_sequencer_kick(intel_dp);
937 938 939 940

	return intel_dp->pps_pipe;
}

941 942 943
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
944
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
945
	int backlight_controller = dev_priv->vbt.backlight.controller;
946 947 948 949

	lockdep_assert_held(&dev_priv->pps_mutex);

	/* We should never land here with regular DP ports */
950
	drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
951 952

	if (!intel_dp->pps_reset)
953
		return backlight_controller;
954 955 956 957 958 959 960

	intel_dp->pps_reset = false;

	/*
	 * Only the HW needs to be reprogrammed, the SW state is fixed and
	 * has been setup during connector init.
	 */
961
	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
962

963
	return backlight_controller;
964 965
}

966 967 968 969 970 971
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
			       enum pipe pipe);

static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
			       enum pipe pipe)
{
972
	return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
973 974 975 976 977
}

static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
				enum pipe pipe)
{
978
	return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
979 980 981 982 983 984 985
}

static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
			 enum pipe pipe)
{
	return true;
}
986

987
static enum pipe
988 989 990
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
		     enum port port,
		     vlv_pipe_check pipe_check)
991 992
{
	enum pipe pipe;
993 994

	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
995
		u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
996
			PANEL_PORT_SELECT_MASK;
997 998 999 1000

		if (port_sel != PANEL_PORT_SELECT_VLV(port))
			continue;

1001 1002 1003
		if (!pipe_check(dev_priv, pipe))
			continue;

1004
		return pipe;
1005 1006
	}

1007 1008 1009 1010 1011 1012
	return INVALID_PIPE;
}

static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
1013
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1014 1015
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum port port = dig_port->base.port;
1016 1017 1018 1019

	lockdep_assert_held(&dev_priv->pps_mutex);

	/* try to find a pipe with this port selected */
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	/* first pick one where the panel is on */
	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
						  vlv_pipe_has_pp_on);
	/* didn't find one? pick one where vdd is on */
	if (intel_dp->pps_pipe == INVALID_PIPE)
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
							  vlv_pipe_has_vdd_on);
	/* didn't find one? pick one with just the correct port */
	if (intel_dp->pps_pipe == INVALID_PIPE)
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
							  vlv_pipe_any);
1031 1032 1033

	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
	if (intel_dp->pps_pipe == INVALID_PIPE) {
1034 1035
		drm_dbg_kms(&dev_priv->drm,
			    "no initial power sequencer for [ENCODER:%d:%s]\n",
1036 1037
			    dig_port->base.base.base.id,
			    dig_port->base.base.name);
1038
		return;
1039 1040
	}

1041 1042
	drm_dbg_kms(&dev_priv->drm,
		    "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
1043 1044
		    dig_port->base.base.base.id,
		    dig_port->base.base.name,
1045
		    pipe_name(intel_dp->pps_pipe));
1046

1047 1048
	intel_dp_init_panel_power_sequencer(intel_dp);
	intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
1049 1050
}

1051
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
1052 1053 1054
{
	struct intel_encoder *encoder;

1055 1056 1057 1058
	if (drm_WARN_ON(&dev_priv->drm,
			!(IS_VALLEYVIEW(dev_priv) ||
			  IS_CHERRYVIEW(dev_priv) ||
			  IS_GEN9_LP(dev_priv))))
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
		return;

	/*
	 * We can't grab pps_mutex here due to deadlock with power_domain
	 * mutex when power_domain functions are called while holding pps_mutex.
	 * That also means that in order to use pps_pipe the code needs to
	 * hold both a power domain reference and pps_mutex, and the power domain
	 * reference get/put must be done while _not_ holding pps_mutex.
	 * pps_{lock,unlock}() do these steps in the correct order, so one
	 * should use them always.
	 */

1071
	for_each_intel_dp(&dev_priv->drm, encoder) {
1072
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1073

1074 1075
		drm_WARN_ON(&dev_priv->drm,
			    intel_dp->active_pipe != INVALID_PIPE);
1076 1077 1078 1079

		if (encoder->type != INTEL_OUTPUT_EDP)
			continue;

1080
		if (IS_GEN9_LP(dev_priv))
1081 1082 1083
			intel_dp->pps_reset = true;
		else
			intel_dp->pps_pipe = INVALID_PIPE;
1084
	}
1085 1086
}

1087 1088 1089 1090 1091 1092 1093 1094
struct pps_registers {
	i915_reg_t pp_ctrl;
	i915_reg_t pp_stat;
	i915_reg_t pp_on;
	i915_reg_t pp_off;
	i915_reg_t pp_div;
};

1095
static void intel_pps_get_registers(struct intel_dp *intel_dp,
1096 1097
				    struct pps_registers *regs)
{
1098
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1099 1100
	int pps_idx = 0;

1101 1102
	memset(regs, 0, sizeof(*regs));

1103
	if (IS_GEN9_LP(dev_priv))
1104 1105 1106
		pps_idx = bxt_power_sequencer_idx(intel_dp);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		pps_idx = vlv_power_sequencer_pipe(intel_dp);
1107

1108 1109 1110 1111
	regs->pp_ctrl = PP_CONTROL(pps_idx);
	regs->pp_stat = PP_STATUS(pps_idx);
	regs->pp_on = PP_ON_DELAYS(pps_idx);
	regs->pp_off = PP_OFF_DELAYS(pps_idx);
1112 1113

	/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1114
	if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1115 1116
		regs->pp_div = INVALID_MMIO_REG;
	else
1117
		regs->pp_div = PP_DIVISOR(pps_idx);
1118 1119
}

1120 1121
static i915_reg_t
_pp_ctrl_reg(struct intel_dp *intel_dp)
1122
{
1123
	struct pps_registers regs;
1124

1125
	intel_pps_get_registers(intel_dp, &regs);
1126 1127

	return regs.pp_ctrl;
1128 1129
}

1130 1131
static i915_reg_t
_pp_stat_reg(struct intel_dp *intel_dp)
1132
{
1133
	struct pps_registers regs;
1134

1135
	intel_pps_get_registers(intel_dp, &regs);
1136 1137

	return regs.pp_stat;
1138 1139
}

1140 1141 1142 1143 1144 1145 1146
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
   This function only applicable when panel PM state is not to be tracked */
static int edp_notify_handler(struct notifier_block *this, unsigned long code,
			      void *unused)
{
	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
						 edp_notifier);
1147
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1148
	intel_wakeref_t wakeref;
1149

1150
	if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1151 1152
		return 0;

1153 1154 1155 1156 1157 1158 1159 1160
	with_pps_lock(intel_dp, wakeref) {
		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
			enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
			i915_reg_t pp_ctrl_reg, pp_div_reg;
			u32 pp_div;

			pp_ctrl_reg = PP_CONTROL(pipe);
			pp_div_reg  = PP_DIVISOR(pipe);
1161
			pp_div = intel_de_read(dev_priv, pp_div_reg);
1162 1163 1164
			pp_div &= PP_REFERENCE_DIVIDER_MASK;

			/* 0x1F write to PP_DIV_REG sets max cycle delay */
1165 1166 1167
			intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
			intel_de_write(dev_priv, pp_ctrl_reg,
				       PANEL_UNLOCK_REGS);
1168 1169
			msleep(intel_dp->panel_power_cycle_delay);
		}
1170 1171 1172 1173 1174
	}

	return 0;
}

1175
static bool edp_have_panel_power(struct intel_dp *intel_dp)
1176
{
1177
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1178

V
Ville Syrjälä 已提交
1179 1180
	lockdep_assert_held(&dev_priv->pps_mutex);

1181
	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1182 1183 1184
	    intel_dp->pps_pipe == INVALID_PIPE)
		return false;

1185
	return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
1186 1187
}

1188
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1189
{
1190
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1191

V
Ville Syrjälä 已提交
1192 1193
	lockdep_assert_held(&dev_priv->pps_mutex);

1194
	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1195 1196 1197
	    intel_dp->pps_pipe == INVALID_PIPE)
		return false;

1198
	return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1199 1200
}

1201 1202 1203
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
1204
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1205

1206
	if (!intel_dp_is_edp(intel_dp))
1207
		return;
1208

1209
	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1210 1211
		drm_WARN(&dev_priv->drm, 1,
			 "eDP powered off while attempting aux channel communication.\n");
1212
		drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
1213 1214
			    intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
			    intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
1215 1216 1217
	}
}

1218
static u32
1219
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1220
{
1221
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1222
	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1223
	const unsigned int timeout_ms = 10;
1224
	u32 status;
1225 1226
	bool done;

1227 1228
#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
	done = wait_event_timeout(i915->gmbus_wait_queue, C,
1229
				  msecs_to_jiffies_timeout(timeout_ms));
1230 1231 1232 1233

	/* just trace the final value */
	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);

1234
	if (!done)
1235
		drm_err(&i915->drm,
1236
			"%s: did not complete or timeout within %ums (status 0x%08x)\n",
1237
			intel_dp->aux.name, timeout_ms, status);
1238 1239 1240 1241 1242
#undef C

	return status;
}

1243
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1244
{
1245
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1246

1247 1248 1249
	if (index)
		return 0;

1250 1251
	/*
	 * The clock divider is based off the hrawclk, and would like to run at
1252
	 * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1253
	 */
1254
	return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
1255 1256
}

1257
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1258
{
1259
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1260
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1261
	u32 freq;
1262 1263 1264 1265

	if (index)
		return 0;

1266 1267 1268 1269 1270
	/*
	 * The clock divider is based off the cdclk or PCH rawclk, and would
	 * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
	 * divide by 2000 and use that
	 */
1271
	if (dig_port->aux_ch == AUX_CH_A)
1272
		freq = dev_priv->cdclk.hw.cdclk;
1273
	else
1274 1275
		freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
	return DIV_ROUND_CLOSEST(freq, 2000);
1276 1277
}

1278
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1279
{
1280
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1281
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1282

1283
	if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1284
		/* Workaround for non-ULT HSW */
1285 1286 1287 1288 1289
		switch (index) {
		case 0: return 63;
		case 1: return 72;
		default: return 0;
		}
1290
	}
1291 1292

	return ilk_get_aux_clock_divider(intel_dp, index);
1293 1294
}

1295
static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1296 1297 1298 1299 1300 1301 1302 1303 1304
{
	/*
	 * SKL doesn't need us to program the AUX clock divider (Hardware will
	 * derive the clock from CDCLK automatically). We still implement the
	 * get_aux_clock_divider vfunc to plug-in into the existing code.
	 */
	return index ? 0 : 1;
}

1305 1306 1307
static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
				int send_bytes,
				u32 aux_clock_divider)
1308
{
1309
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1310
	struct drm_i915_private *dev_priv =
1311
			to_i915(dig_port->base.base.dev);
1312
	u32 precharge, timeout;
1313

1314
	if (IS_GEN(dev_priv, 6))
1315 1316 1317 1318
		precharge = 3;
	else
		precharge = 5;

1319
	if (IS_BROADWELL(dev_priv))
1320 1321 1322 1323 1324
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
	else
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;

	return DP_AUX_CH_CTL_SEND_BUSY |
1325
	       DP_AUX_CH_CTL_DONE |
1326
	       DP_AUX_CH_CTL_INTERRUPT |
1327
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
1328
	       timeout |
1329
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
1330 1331
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1332
	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1333 1334
}

1335 1336 1337
static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
				int send_bytes,
				u32 unused)
1338
{
1339
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1340
	struct drm_i915_private *i915 =
1341 1342
			to_i915(dig_port->base.base.dev);
	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1343
	u32 ret;
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354

	ret = DP_AUX_CH_CTL_SEND_BUSY |
	      DP_AUX_CH_CTL_DONE |
	      DP_AUX_CH_CTL_INTERRUPT |
	      DP_AUX_CH_CTL_TIME_OUT_ERROR |
	      DP_AUX_CH_CTL_TIME_OUT_MAX |
	      DP_AUX_CH_CTL_RECEIVE_ERROR |
	      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);

1355
	if (intel_phy_is_tc(i915, phy) &&
1356
	    dig_port->tc_mode == TC_PORT_TBT_ALT)
1357 1358 1359
		ret |= DP_AUX_CH_CTL_TBT_IO;

	return ret;
1360 1361
}

1362
static int
1363
intel_dp_aux_xfer(struct intel_dp *intel_dp,
1364 1365
		  const u8 *send, int send_bytes,
		  u8 *recv, int recv_size,
1366
		  u32 aux_send_ctl_flags)
1367
{
1368
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1369
	struct drm_i915_private *i915 =
1370
			to_i915(dig_port->base.base.dev);
1371
	struct intel_uncore *uncore = &i915->uncore;
1372
	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1373
	bool is_tc_port = intel_phy_is_tc(i915, phy);
1374
	i915_reg_t ch_ctl, ch_data[5];
1375
	u32 aux_clock_divider;
1376
	enum intel_display_power_domain aux_domain;
1377 1378
	intel_wakeref_t aux_wakeref;
	intel_wakeref_t pps_wakeref;
1379
	int i, ret, recv_bytes;
1380
	int try, clock = 0;
1381
	u32 status;
1382 1383
	bool vdd;

1384 1385 1386 1387
	ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
	for (i = 0; i < ARRAY_SIZE(ch_data); i++)
		ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);

1388
	if (is_tc_port)
1389
		intel_tc_port_lock(dig_port);
1390

1391
	aux_domain = intel_aux_power_domain(dig_port);
1392

1393
	aux_wakeref = intel_display_power_get(i915, aux_domain);
1394
	pps_wakeref = pps_lock(intel_dp);
V
Ville Syrjälä 已提交
1395

1396 1397 1398 1399 1400 1401
	/*
	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
	 * In such cases we want to leave VDD enabled and it's up to upper layers
	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
	 * ourselves.
	 */
1402
	vdd = edp_panel_vdd_on(intel_dp);
1403 1404 1405 1406 1407

	/* dp aux is extremely sensitive to irq latency, hence request the
	 * lowest possible wakeup latency and so prevent the cpu from going into
	 * deep sleep states.
	 */
1408
	cpu_latency_qos_update_request(&i915->pm_qos, 0);
1409 1410

	intel_dp_check_edp(intel_dp);
1411

1412 1413
	/* Try to wait for any previous AUX channel activity */
	for (try = 0; try < 3; try++) {
1414
		status = intel_uncore_read_notrace(uncore, ch_ctl);
1415 1416 1417 1418
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
			break;
		msleep(1);
	}
1419 1420
	/* just trace the final value */
	trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1421 1422

	if (try == 3) {
1423
		const u32 status = intel_uncore_read(uncore, ch_ctl);
1424

1425
		if (status != intel_dp->aux_busy_last_status) {
1426 1427 1428
			drm_WARN(&i915->drm, 1,
				 "%s: not started (status 0x%08x)\n",
				 intel_dp->aux.name, status);
1429
			intel_dp->aux_busy_last_status = status;
1430 1431
		}

1432 1433
		ret = -EBUSY;
		goto out;
1434 1435
	}

1436
	/* Only 5 data registers! */
1437
	if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
1438 1439 1440 1441
		ret = -E2BIG;
		goto out;
	}

1442
	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1443 1444 1445 1446 1447
		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
							  send_bytes,
							  aux_clock_divider);

		send_ctl |= aux_send_ctl_flags;
1448

1449 1450 1451 1452
		/* Must try at least 3 times according to DP spec */
		for (try = 0; try < 5; try++) {
			/* Load the send data into the aux channel data registers */
			for (i = 0; i < send_bytes; i += 4)
1453 1454 1455 1456
				intel_uncore_write(uncore,
						   ch_data[i >> 2],
						   intel_dp_pack_aux(send + i,
								     send_bytes - i));
1457 1458

			/* Send the command and wait for it to complete */
1459
			intel_uncore_write(uncore, ch_ctl, send_ctl);
1460

1461
			status = intel_dp_aux_wait_done(intel_dp);
1462 1463

			/* Clear done status and any errors */
1464 1465 1466 1467 1468 1469
			intel_uncore_write(uncore,
					   ch_ctl,
					   status |
					   DP_AUX_CH_CTL_DONE |
					   DP_AUX_CH_CTL_TIME_OUT_ERROR |
					   DP_AUX_CH_CTL_RECEIVE_ERROR);
1470

1471 1472 1473 1474 1475
			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
			 *   400us delay required for errors and timeouts
			 *   Timeout errors from the HW already meet this
			 *   requirement so skip to next iteration
			 */
1476 1477 1478
			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
				continue;

1479 1480
			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
				usleep_range(400, 500);
1481
				continue;
1482
			}
1483
			if (status & DP_AUX_CH_CTL_DONE)
1484
				goto done;
1485
		}
1486 1487 1488
	}

	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1489 1490
		drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
			intel_dp->aux.name, status);
1491 1492
		ret = -EBUSY;
		goto out;
1493 1494
	}

1495
done:
1496 1497 1498
	/* Check for timeout or receive error.
	 * Timeouts occur when the sink is not connected
	 */
1499
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1500 1501
		drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
			intel_dp->aux.name, status);
1502 1503
		ret = -EIO;
		goto out;
1504
	}
1505 1506 1507

	/* Timeouts occur when the device isn't connected, so they're
	 * "normal" -- don't fill the kernel log with these */
1508
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1509 1510
		drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
			    intel_dp->aux.name, status);
1511 1512
		ret = -ETIMEDOUT;
		goto out;
1513 1514 1515 1516 1517
	}

	/* Unload any bytes sent back from the other side */
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1518 1519 1520 1521 1522 1523 1524

	/*
	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
	 * We have no idea of what happened so we return -EBUSY so
	 * drm layer takes care for the necessary retries.
	 */
	if (recv_bytes == 0 || recv_bytes > 20) {
1525
		drm_dbg_kms(&i915->drm,
1526 1527
			    "%s: Forbidden recv_bytes = %d on aux transaction\n",
			    intel_dp->aux.name, recv_bytes);
1528 1529 1530 1531
		ret = -EBUSY;
		goto out;
	}

1532 1533
	if (recv_bytes > recv_size)
		recv_bytes = recv_size;
1534

1535
	for (i = 0; i < recv_bytes; i += 4)
1536
		intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1537
				    recv + i, recv_bytes - i);
1538

1539 1540
	ret = recv_bytes;
out:
1541
	cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1542

1543 1544 1545
	if (vdd)
		edp_panel_vdd_off(intel_dp, false);

1546
	pps_unlock(intel_dp, pps_wakeref);
1547
	intel_display_power_put_async(i915, aux_domain, aux_wakeref);
V
Ville Syrjälä 已提交
1548

1549
	if (is_tc_port)
1550
		intel_tc_port_unlock(dig_port);
1551

1552
	return ret;
1553 1554
}

1555 1556
#define BARE_ADDRESS_SIZE	3
#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567

static void
intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
		    const struct drm_dp_aux_msg *msg)
{
	txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
	txbuf[1] = (msg->address >> 8) & 0xff;
	txbuf[2] = msg->address & 0xff;
	txbuf[3] = msg->size - 1;
}

1568 1569
static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1570
{
1571
	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1572
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1573
	u8 txbuf[20], rxbuf[20];
1574
	size_t txsize, rxsize;
1575 1576
	int ret;

1577
	intel_dp_aux_header(txbuf, msg);
1578

1579 1580 1581
	switch (msg->request & ~DP_AUX_I2C_MOT) {
	case DP_AUX_NATIVE_WRITE:
	case DP_AUX_I2C_WRITE:
1582
	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1583
		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1584
		rxsize = 2; /* 0 or 1 data bytes */
1585

1586
		if (drm_WARN_ON(&i915->drm, txsize > 20))
1587
			return -E2BIG;
1588

1589
		drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
1590

1591 1592
		if (msg->buffer)
			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1593

1594
		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1595
					rxbuf, rxsize, 0);
1596 1597
		if (ret > 0) {
			msg->reply = rxbuf[0] >> 4;
1598

1599 1600 1601 1602 1603 1604 1605
			if (ret > 1) {
				/* Number of bytes written in a short write. */
				ret = clamp_t(int, rxbuf[1], 0, msg->size);
			} else {
				/* Return payload size. */
				ret = msg->size;
			}
1606 1607
		}
		break;
1608

1609 1610
	case DP_AUX_NATIVE_READ:
	case DP_AUX_I2C_READ:
1611
		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1612
		rxsize = msg->size + 1;
1613

1614
		if (drm_WARN_ON(&i915->drm, rxsize > 20))
1615
			return -E2BIG;
1616

1617
		ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1618
					rxbuf, rxsize, 0);
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
		if (ret > 0) {
			msg->reply = rxbuf[0] >> 4;
			/*
			 * Assume happy day, and copy the data. The caller is
			 * expected to check msg->reply before touching it.
			 *
			 * Return payload size.
			 */
			ret--;
			memcpy(msg->buffer, rxbuf + 1, ret);
1629
		}
1630 1631 1632 1633 1634
		break;

	default:
		ret = -EINVAL;
		break;
1635
	}
1636

1637
	return ret;
1638 1639
}

1640

1641
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1642
{
1643
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1644 1645
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1646

1647 1648 1649 1650 1651
	switch (aux_ch) {
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
		return DP_AUX_CH_CTL(aux_ch);
1652
	default:
1653 1654
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_CTL(AUX_CH_B);
1655 1656 1657
	}
}

1658
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1659
{
1660
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1661 1662
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1663

1664 1665 1666 1667 1668
	switch (aux_ch) {
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
		return DP_AUX_CH_DATA(aux_ch, index);
1669
	default:
1670 1671
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_DATA(AUX_CH_B, index);
1672 1673 1674
	}
}

1675
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1676
{
1677
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1678 1679
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1680

1681 1682 1683 1684 1685 1686 1687
	switch (aux_ch) {
	case AUX_CH_A:
		return DP_AUX_CH_CTL(aux_ch);
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
		return PCH_DP_AUX_CH_CTL(aux_ch);
1688
	default:
1689 1690
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_CTL(AUX_CH_A);
1691 1692 1693
	}
}

1694
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1695
{
1696
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1697 1698
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1699

1700 1701 1702 1703 1704 1705 1706
	switch (aux_ch) {
	case AUX_CH_A:
		return DP_AUX_CH_DATA(aux_ch, index);
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
		return PCH_DP_AUX_CH_DATA(aux_ch, index);
1707
	default:
1708 1709
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_DATA(AUX_CH_A, index);
1710 1711 1712
	}
}

1713
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1714
{
1715
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1716 1717
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1718

1719 1720 1721 1722 1723
	switch (aux_ch) {
	case AUX_CH_A:
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
1724
	case AUX_CH_E:
1725
	case AUX_CH_F:
1726
	case AUX_CH_G:
1727
		return DP_AUX_CH_CTL(aux_ch);
1728
	default:
1729 1730
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_CTL(AUX_CH_A);
1731 1732 1733
	}
}

1734
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1735
{
1736
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1737 1738
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum aux_ch aux_ch = dig_port->aux_ch;
1739

1740 1741 1742 1743 1744
	switch (aux_ch) {
	case AUX_CH_A:
	case AUX_CH_B:
	case AUX_CH_C:
	case AUX_CH_D:
1745
	case AUX_CH_E:
1746
	case AUX_CH_F:
1747
	case AUX_CH_G:
1748
		return DP_AUX_CH_DATA(aux_ch, index);
1749
	default:
1750 1751
		MISSING_CASE(aux_ch);
		return DP_AUX_CH_DATA(AUX_CH_A, index);
1752 1753 1754
	}
}

1755 1756 1757 1758 1759 1760 1761 1762
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
	kfree(intel_dp->aux.name);
}

static void
intel_dp_aux_init(struct intel_dp *intel_dp)
1763
{
1764
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1765 1766
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct intel_encoder *encoder = &dig_port->base;
1767

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
	if (INTEL_GEN(dev_priv) >= 9) {
		intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
		intel_dp->aux_ch_data_reg = skl_aux_data_reg;
	} else if (HAS_PCH_SPLIT(dev_priv)) {
		intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
		intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
	} else {
		intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
		intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
	}
1778

1779 1780 1781 1782 1783 1784 1785 1786
	if (INTEL_GEN(dev_priv) >= 9)
		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
	else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
	else if (HAS_PCH_SPLIT(dev_priv))
		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
	else
		intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1787

1788 1789 1790 1791
	if (INTEL_GEN(dev_priv) >= 9)
		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
	else
		intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1792

1793
	drm_dp_aux_init(&intel_dp->aux);
1794

1795
	/* Failure to allocate our preferred name is not critical */
1796 1797
	intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
				       aux_ch_name(dig_port->aux_ch),
1798
				       port_name(encoder->port));
1799
	intel_dp->aux.transfer = intel_dp_aux_transfer;
1800 1801
}

1802
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1803
{
1804
	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1805

1806
	return max_rate >= 540000;
1807 1808
}

1809 1810 1811 1812 1813 1814 1815
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
{
	int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];

	return max_rate >= 810000;
}

1816 1817
static void
intel_dp_set_clock(struct intel_encoder *encoder,
1818
		   struct intel_crtc_state *pipe_config)
1819
{
1820
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1821 1822
	const struct dp_link_dpll *divisor = NULL;
	int i, count = 0;
1823

1824
	if (IS_G4X(dev_priv)) {
1825 1826
		divisor = g4x_dpll;
		count = ARRAY_SIZE(g4x_dpll);
1827
	} else if (HAS_PCH_SPLIT(dev_priv)) {
1828 1829
		divisor = pch_dpll;
		count = ARRAY_SIZE(pch_dpll);
1830
	} else if (IS_CHERRYVIEW(dev_priv)) {
1831 1832
		divisor = chv_dpll;
		count = ARRAY_SIZE(chv_dpll);
1833
	} else if (IS_VALLEYVIEW(dev_priv)) {
1834 1835
		divisor = vlv_dpll;
		count = ARRAY_SIZE(vlv_dpll);
1836
	}
1837 1838 1839

	if (divisor && count) {
		for (i = 0; i < count; i++) {
1840
			if (pipe_config->port_clock == divisor[i].clock) {
1841 1842 1843 1844 1845
				pipe_config->dpll = divisor[i].dpll;
				pipe_config->clock_set = true;
				break;
			}
		}
1846 1847 1848
	}
}

1849 1850 1851 1852 1853 1854 1855 1856
static void snprintf_int_array(char *str, size_t len,
			       const int *array, int nelem)
{
	int i;

	str[0] = '\0';

	for (i = 0; i < nelem; i++) {
1857
		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1858 1859 1860 1861 1862 1863 1864 1865 1866
		if (r >= len)
			return;
		str += r;
		len -= r;
	}
}

static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
1867
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1868 1869
	char str[128]; /* FIXME: too big for stack? */

1870
	if (!drm_debug_enabled(DRM_UT_KMS))
1871 1872
		return;

1873 1874
	snprintf_int_array(str, sizeof(str),
			   intel_dp->source_rates, intel_dp->num_source_rates);
1875
	drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
1876

1877 1878
	snprintf_int_array(str, sizeof(str),
			   intel_dp->sink_rates, intel_dp->num_sink_rates);
1879
	drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
1880

1881 1882
	snprintf_int_array(str, sizeof(str),
			   intel_dp->common_rates, intel_dp->num_common_rates);
1883
	drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
1884 1885
}

1886 1887 1888
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
1889
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1890 1891
	int len;

1892
	len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1893
	if (drm_WARN_ON(&i915->drm, len <= 0))
1894 1895
		return 162000;

1896
	return intel_dp->common_rates[len - 1];
1897 1898
}

1899 1900
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
{
1901
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1902 1903
	int i = intel_dp_rate_index(intel_dp->sink_rates,
				    intel_dp->num_sink_rates, rate);
1904

1905
	if (drm_WARN_ON(&i915->drm, i < 0))
1906 1907 1908
		i = 0;

	return i;
1909 1910
}

1911
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1912
			   u8 *link_bw, u8 *rate_select)
1913
{
1914 1915
	/* eDP 1.4 rate select method. */
	if (intel_dp->use_rate_select) {
1916 1917 1918 1919 1920 1921 1922 1923 1924
		*link_bw = 0;
		*rate_select =
			intel_dp_rate_select(intel_dp, port_clock);
	} else {
		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
		*rate_select = 0;
	}
}

1925
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1926 1927 1928 1929
					 const struct intel_crtc_state *pipe_config)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

1930 1931 1932 1933 1934 1935 1936 1937
	/* On TGL, FEC is supported on all Pipes */
	if (INTEL_GEN(dev_priv) >= 12)
		return true;

	if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
		return true;

	return false;
1938 1939 1940 1941 1942 1943 1944 1945 1946
}

static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
				  const struct intel_crtc_state *pipe_config)
{
	return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
		drm_dp_sink_supports_fec(intel_dp->fec_capable);
}

1947
static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1948
				  const struct intel_crtc_state *crtc_state)
1949
{
1950 1951 1952
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;

	if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
1953 1954
		return false;

1955
	return intel_dsc_source_support(encoder, crtc_state) &&
1956 1957 1958
		drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
}

1959 1960
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
				struct intel_crtc_state *pipe_config)
1961
{
1962
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1963
	struct intel_connector *intel_connector = intel_dp->attached_connector;
1964 1965 1966 1967 1968 1969 1970 1971
	int bpp, bpc;

	bpp = pipe_config->pipe_bpp;
	bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);

	if (bpc > 0)
		bpp = min(bpp, 3*bpc);

1972 1973 1974 1975
	if (intel_dp_is_edp(intel_dp)) {
		/* Get bpp from vbt only for panels that dont have bpp in edid */
		if (intel_connector->base.display_info.bpc == 0 &&
		    dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1976 1977 1978
			drm_dbg_kms(&dev_priv->drm,
				    "clamping bpp for eDP panel to BIOS-provided %i\n",
				    dev_priv->vbt.edp.bpp);
1979 1980 1981 1982
			bpp = dev_priv->vbt.edp.bpp;
		}
	}

1983 1984 1985
	return bpp;
}

1986
/* Adjust link config limits based on compliance test requests. */
1987
void
1988 1989 1990 1991
intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
				  struct intel_crtc_state *pipe_config,
				  struct link_config_limits *limits)
{
1992 1993
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

1994 1995 1996 1997 1998 1999 2000
	/* For DP Compliance we override the computed bpp for the pipe */
	if (intel_dp->compliance.test_data.bpc != 0) {
		int bpp = 3 * intel_dp->compliance.test_data.bpc;

		limits->min_bpp = limits->max_bpp = bpp;
		pipe_config->dither_force_disable = bpp == 6 * 3;

2001
		drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
	}

	/* Use values requested by Compliance Test Request */
	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
		int index;

		/* Validate the compliance test data since max values
		 * might have changed due to link train fallback.
		 */
		if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
					       intel_dp->compliance.test_lane_count)) {
			index = intel_dp_rate_index(intel_dp->common_rates,
						    intel_dp->num_common_rates,
						    intel_dp->compliance.test_link_rate);
			if (index >= 0)
				limits->min_clock = limits->max_clock = index;
			limits->min_lane_count = limits->max_lane_count =
				intel_dp->compliance.test_lane_count;
		}
	}
}

2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
{
	/*
	 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
	 * format of the number of bytes per pixel will be half the number
	 * of bytes of RGB pixel.
	 */
	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
		bpp /= 2;

	return bpp;
}

2037
/* Optimize link config in order: max bpp, min clock, min lanes */
2038
static int
2039 2040 2041 2042
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
				  struct intel_crtc_state *pipe_config,
				  const struct link_config_limits *limits)
{
2043
	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2044 2045 2046 2047
	int bpp, clock, lane_count;
	int mode_rate, link_clock, link_avail;

	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
2048 2049
		int output_bpp = intel_dp_output_bpp(pipe_config, bpp);

2050
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
2051
						   output_bpp);
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065

		for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
			for (lane_count = limits->min_lane_count;
			     lane_count <= limits->max_lane_count;
			     lane_count <<= 1) {
				link_clock = intel_dp->common_rates[clock];
				link_avail = intel_dp_max_data_rate(link_clock,
								    lane_count);

				if (mode_rate <= link_avail) {
					pipe_config->lane_count = lane_count;
					pipe_config->pipe_bpp = bpp;
					pipe_config->port_clock = link_clock;

2066
					return 0;
2067 2068 2069 2070 2071
				}
			}
		}
	}

2072
	return -EINVAL;
2073 2074
}

2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
	int i, num_bpc;
	u8 dsc_bpc[3] = {0};

	num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
						       dsc_bpc);
	for (i = 0; i < num_bpc; i++) {
		if (dsc_max_bpc >= dsc_bpc[i])
			return dsc_bpc[i] * 3;
	}

	return 0;
}

2090 2091 2092 2093 2094
#define DSC_SUPPORTED_VERSION_MIN		1

static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
				       struct intel_crtc_state *crtc_state)
{
2095
	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2096
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2097 2098 2099 2100 2101 2102 2103 2104
	struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
	u8 line_buf_depth;
	int ret;

	ret = intel_dsc_compute_params(encoder, crtc_state);
	if (ret)
		return ret;

2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	/*
	 * Slice Height of 8 works for all currently available panels. So start
	 * with that if pic_height is an integral multiple of 8. Eventually add
	 * logic to try multiple slice heights.
	 */
	if (vdsc_cfg->pic_height % 8 == 0)
		vdsc_cfg->slice_height = 8;
	else if (vdsc_cfg->pic_height % 4 == 0)
		vdsc_cfg->slice_height = 4;
	else
		vdsc_cfg->slice_height = 2;

2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
	vdsc_cfg->dsc_version_major =
		(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
		 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
	vdsc_cfg->dsc_version_minor =
		min(DSC_SUPPORTED_VERSION_MIN,
		    (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
		     DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);

	vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
		DP_DSC_RGB;

	line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
	if (!line_buf_depth) {
2130 2131
		drm_dbg_kms(&i915->drm,
			    "DSC Sink Line Buffer Depth invalid\n");
2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
		return -EINVAL;
	}

	if (vdsc_cfg->dsc_version_minor == 2)
		vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
			DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
	else
		vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
			DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;

	vdsc_cfg->block_pred_enable =
		intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
		DP_DSC_BLK_PREDICTION_IS_SUPPORTED;

	return drm_dsc_compute_rc_parameters(vdsc_cfg);
}

2149 2150 2151 2152
static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
				       struct intel_crtc_state *pipe_config,
				       struct drm_connector_state *conn_state,
				       struct link_config_limits *limits)
2153 2154 2155
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2156 2157
	const struct drm_display_mode *adjusted_mode =
		&pipe_config->hw.adjusted_mode;
2158 2159
	u8 dsc_max_bpc;
	int pipe_bpp;
2160
	int ret;
2161

2162 2163 2164
	pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
		intel_dp_supports_fec(intel_dp, pipe_config);

2165
	if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2166
		return -EINVAL;
2167

2168 2169 2170 2171 2172 2173
	/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
	if (INTEL_GEN(dev_priv) >= 12)
		dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
	else
		dsc_max_bpc = min_t(u8, 10,
				    conn_state->max_requested_bpc);
2174 2175

	pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2176 2177 2178

	/* Min Input BPC for ICL+ is 8 */
	if (pipe_bpp < 8 * 3) {
2179 2180
		drm_dbg_kms(&dev_priv->drm,
			    "No DSC support for less than 8bpc\n");
2181
		return -EINVAL;
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193
	}

	/*
	 * For now enable DSC for max bpp, max link rate, max lane count.
	 * Optimize this later for the minimum possible link rate/lane count
	 * with DSC enabled for the requested mode.
	 */
	pipe_config->pipe_bpp = pipe_bpp;
	pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
	pipe_config->lane_count = limits->max_lane_count;

	if (intel_dp_is_edp(intel_dp)) {
2194
		pipe_config->dsc.compressed_bpp =
2195 2196
			min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
			      pipe_config->pipe_bpp);
2197
		pipe_config->dsc.slice_count =
2198 2199 2200 2201 2202 2203 2204
			drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
							true);
	} else {
		u16 dsc_max_output_bpp;
		u8 dsc_dp_slice_count;

		dsc_max_output_bpp =
2205 2206
			intel_dp_dsc_get_output_bpp(dev_priv,
						    pipe_config->port_clock,
2207 2208 2209 2210 2211 2212 2213 2214
						    pipe_config->lane_count,
						    adjusted_mode->crtc_clock,
						    adjusted_mode->crtc_hdisplay);
		dsc_dp_slice_count =
			intel_dp_dsc_get_slice_count(intel_dp,
						     adjusted_mode->crtc_clock,
						     adjusted_mode->crtc_hdisplay);
		if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2215 2216
			drm_dbg_kms(&dev_priv->drm,
				    "Compressed BPP/Slice Count not supported\n");
2217
			return -EINVAL;
2218
		}
2219
		pipe_config->dsc.compressed_bpp = min_t(u16,
2220 2221
							       dsc_max_output_bpp >> 4,
							       pipe_config->pipe_bpp);
2222
		pipe_config->dsc.slice_count = dsc_dp_slice_count;
2223 2224 2225 2226 2227 2228 2229
	}
	/*
	 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
	 * is greater than the maximum Cdclock and if slice count is even
	 * then we need to use 2 VDSC instances.
	 */
	if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
2230 2231
		if (pipe_config->dsc.slice_count > 1) {
			pipe_config->dsc.dsc_split = true;
2232
		} else {
2233 2234
			drm_dbg_kms(&dev_priv->drm,
				    "Cannot split stream to use 2 VDSC instances\n");
2235
			return -EINVAL;
2236 2237
		}
	}
2238

2239
	ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
2240
	if (ret < 0) {
2241 2242 2243 2244 2245
		drm_dbg_kms(&dev_priv->drm,
			    "Cannot compute valid DSC parameters for Input Bpp = %d "
			    "Compressed BPP = %d\n",
			    pipe_config->pipe_bpp,
			    pipe_config->dsc.compressed_bpp);
2246
		return ret;
2247
	}
2248

2249
	pipe_config->dsc.compression_enable = true;
2250 2251 2252 2253 2254
	drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
		    "Compressed Bpp = %d Slice Count = %d\n",
		    pipe_config->pipe_bpp,
		    pipe_config->dsc.compressed_bpp,
		    pipe_config->dsc.slice_count);
2255

2256
	return 0;
2257 2258
}

2259 2260 2261 2262 2263 2264 2265 2266
int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
{
	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
		return 6 * 3;
	else
		return 8 * 3;
}

2267
static int
2268
intel_dp_compute_link_config(struct intel_encoder *encoder,
2269 2270
			     struct intel_crtc_state *pipe_config,
			     struct drm_connector_state *conn_state)
2271
{
2272
	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2273 2274
	const struct drm_display_mode *adjusted_mode =
		&pipe_config->hw.adjusted_mode;
2275
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2276
	struct link_config_limits limits;
2277
	int common_len;
2278
	int ret;
2279

2280
	common_len = intel_dp_common_len_rate_limit(intel_dp,
2281
						    intel_dp->max_link_rate);
2282 2283

	/* No common link rates between source and sink */
2284
	drm_WARN_ON(encoder->base.dev, common_len <= 0);
2285

2286 2287 2288 2289 2290 2291
	limits.min_clock = 0;
	limits.max_clock = common_len - 1;

	limits.min_lane_count = 1;
	limits.max_lane_count = intel_dp_max_lane_count(intel_dp);

2292
	limits.min_bpp = intel_dp_min_bpp(pipe_config);
2293
	limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2294

2295
	if (intel_dp_is_edp(intel_dp)) {
2296 2297
		/*
		 * Use the maximum clock and number of lanes the eDP panel
2298 2299 2300 2301
		 * advertizes being capable of. The panels are generally
		 * designed to support only a single clock and lane
		 * configuration, and typically these values correspond to the
		 * native resolution of the panel.
2302
		 */
2303 2304
		limits.min_lane_count = limits.max_lane_count;
		limits.min_clock = limits.max_clock;
2305
	}
2306

2307 2308
	intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);

2309 2310 2311 2312 2313
	drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
		    "max rate %d max bpp %d pixel clock %iKHz\n",
		    limits.max_lane_count,
		    intel_dp->common_rates[limits.max_clock],
		    limits.max_bpp, adjusted_mode->crtc_clock);
2314

2315 2316 2317 2318 2319
	/*
	 * Optimize for slow and wide. This is the place to add alternative
	 * optimization policy.
	 */
	ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2320 2321

	/* enable compression if the mode doesn't fit available BW */
2322
	drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
2323 2324 2325 2326 2327
	if (ret || intel_dp->force_dsc_en) {
		ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
						  conn_state, &limits);
		if (ret < 0)
			return ret;
2328
	}
2329

2330
	if (pipe_config->dsc.compression_enable) {
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
		drm_dbg_kms(&i915->drm,
			    "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
			    pipe_config->lane_count, pipe_config->port_clock,
			    pipe_config->pipe_bpp,
			    pipe_config->dsc.compressed_bpp);

		drm_dbg_kms(&i915->drm,
			    "DP link rate required %i available %i\n",
			    intel_dp_link_required(adjusted_mode->crtc_clock,
						   pipe_config->dsc.compressed_bpp),
			    intel_dp_max_data_rate(pipe_config->port_clock,
						   pipe_config->lane_count));
2343
	} else {
2344 2345 2346
		drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
			    pipe_config->lane_count, pipe_config->port_clock,
			    pipe_config->pipe_bpp);
2347

2348 2349 2350 2351 2352 2353
		drm_dbg_kms(&i915->drm,
			    "DP link rate required %i available %i\n",
			    intel_dp_link_required(adjusted_mode->crtc_clock,
						   pipe_config->pipe_bpp),
			    intel_dp_max_data_rate(pipe_config->port_clock,
						   pipe_config->lane_count));
2354
	}
2355
	return 0;
2356 2357
}

2358 2359
static int
intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2360 2361
			 struct intel_crtc_state *crtc_state,
			 const struct drm_connector_state *conn_state)
2362
{
2363
	struct drm_connector *connector = conn_state->connector;
2364 2365
	const struct drm_display_info *info = &connector->display_info;
	const struct drm_display_mode *adjusted_mode =
2366
		&crtc_state->hw.adjusted_mode;
2367 2368 2369 2370 2371 2372 2373 2374

	if (!drm_mode_is_420_only(info, adjusted_mode) ||
	    !intel_dp_get_colorimetry_status(intel_dp) ||
	    !connector->ycbcr_420_allowed)
		return 0;

	crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;

2375
	return intel_pch_panel_fitting(crtc_state, conn_state);
2376 2377
}

2378 2379 2380 2381 2382 2383
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
				  const struct drm_connector_state *conn_state)
{
	const struct intel_digital_connector_state *intel_conn_state =
		to_intel_digital_connector_state(conn_state);
	const struct drm_display_mode *adjusted_mode =
2384
		&crtc_state->hw.adjusted_mode;
2385

2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
	/*
	 * Our YCbCr output is always limited range.
	 * crtc_state->limited_color_range only applies to RGB,
	 * and it must never be set for YCbCr or we risk setting
	 * some conflicting bits in PIPECONF which will mess up
	 * the colors on the monitor.
	 */
	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
		return false;

2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410
	if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
		/*
		 * See:
		 * CEA-861-E - 5.1 Default Encoding Parameters
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
		 */
		return crtc_state->pipe_bpp != 18 &&
			drm_default_rgb_quant_range(adjusted_mode) ==
			HDMI_QUANTIZATION_RANGE_LIMITED;
	} else {
		return intel_conn_state->broadcast_rgb ==
			INTEL_BROADCAST_RGB_LIMITED;
	}
}

2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
				    enum port port)
{
	if (IS_G4X(dev_priv))
		return false;
	if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
		return false;

	return true;
}

2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507
static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
					     const struct drm_connector_state *conn_state,
					     struct drm_dp_vsc_sdp *vsc)
{
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);

	/*
	 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
	 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
	 * Colorimetry Format indication.
	 */
	vsc->revision = 0x5;
	vsc->length = 0x13;

	/* DP 1.4a spec, Table 2-120 */
	switch (crtc_state->output_format) {
	case INTEL_OUTPUT_FORMAT_YCBCR444:
		vsc->pixelformat = DP_PIXELFORMAT_YUV444;
		break;
	case INTEL_OUTPUT_FORMAT_YCBCR420:
		vsc->pixelformat = DP_PIXELFORMAT_YUV420;
		break;
	case INTEL_OUTPUT_FORMAT_RGB:
	default:
		vsc->pixelformat = DP_PIXELFORMAT_RGB;
	}

	switch (conn_state->colorspace) {
	case DRM_MODE_COLORIMETRY_BT709_YCC:
		vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
		break;
	case DRM_MODE_COLORIMETRY_XVYCC_601:
		vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
		break;
	case DRM_MODE_COLORIMETRY_XVYCC_709:
		vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
		break;
	case DRM_MODE_COLORIMETRY_SYCC_601:
		vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
		break;
	case DRM_MODE_COLORIMETRY_OPYCC_601:
		vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
		break;
	case DRM_MODE_COLORIMETRY_BT2020_CYCC:
		vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
		break;
	case DRM_MODE_COLORIMETRY_BT2020_RGB:
		vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
		break;
	case DRM_MODE_COLORIMETRY_BT2020_YCC:
		vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
		break;
	case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
	case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
		vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
		break;
	default:
		/*
		 * RGB->YCBCR color conversion uses the BT.709
		 * color space.
		 */
		if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
			vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
		else
			vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
		break;
	}

	vsc->bpc = crtc_state->pipe_bpp / 3;

	/* only RGB pixelformat supports 6 bpc */
	drm_WARN_ON(&dev_priv->drm,
		    vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);

	/* all YCbCr are always limited range */
	vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
	vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
}

static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
				     struct intel_crtc_state *crtc_state,
				     const struct drm_connector_state *conn_state)
{
	struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;

2508 2509
	/* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
	if (crtc_state->has_psr)
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
		return;

	if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
		return;

	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
	vsc->sdp_type = DP_SDP_VSC;
	intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
					 &crtc_state->infoframes.vsc);
}

2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
				  const struct intel_crtc_state *crtc_state,
				  const struct drm_connector_state *conn_state,
				  struct drm_dp_vsc_sdp *vsc)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

	vsc->sdp_type = DP_SDP_VSC;

	if (dev_priv->psr.psr2_enabled) {
		if (dev_priv->psr.colorimetry_support &&
		    intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
			/* [PSR2, +Colorimetry] */
			intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
							 vsc);
		} else {
			/*
			 * [PSR2, -Colorimetry]
			 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
			 * 3D stereo + PSR/PSR2 + Y-coordinate.
			 */
			vsc->revision = 0x4;
			vsc->length = 0xe;
		}
	} else {
		/*
		 * [PSR1]
		 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
		 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
		 * higher).
		 */
		vsc->revision = 0x2;
		vsc->length = 0x8;
	}
}

2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579
static void
intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
					    struct intel_crtc_state *crtc_state,
					    const struct drm_connector_state *conn_state)
{
	int ret;
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;

	if (!conn_state->hdr_output_metadata)
		return;

	ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);

	if (ret) {
		drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
		return;
	}

	crtc_state->infoframes.enable |=
		intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}

2580
int
2581 2582 2583 2584 2585
intel_dp_compute_config(struct intel_encoder *encoder,
			struct intel_crtc_state *pipe_config,
			struct drm_connector_state *conn_state)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2586
	struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2587 2588
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
	struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
2589 2590 2591 2592
	enum port port = encoder->port;
	struct intel_connector *intel_connector = intel_dp->attached_connector;
	struct intel_digital_connector_state *intel_conn_state =
		to_intel_digital_connector_state(conn_state);
L
Lyude Paul 已提交
2593
	bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
2594
					   DP_DPCD_QUIRK_CONSTANT_N);
2595
	int ret = 0, output_bpp;
2596 2597 2598 2599

	if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
		pipe_config->has_pch_encoder = true;

2600
	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2601

2602 2603
	if (lspcon->active)
		lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2604
	else
2605 2606
		ret = intel_dp_ycbcr420_config(intel_dp, pipe_config,
					       conn_state);
2607 2608
	if (ret)
		return ret;
2609

2610
	pipe_config->has_drrs = false;
2611
	if (!intel_dp_port_has_audio(dev_priv, port))
2612 2613 2614 2615 2616 2617 2618
		pipe_config->has_audio = false;
	else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
		pipe_config->has_audio = intel_dp->has_audio;
	else
		pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;

	if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2619 2620
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
				       adjusted_mode);
2621

R
Rodrigo Vivi 已提交
2622
		if (HAS_GMCH(dev_priv))
2623
			ret = intel_gmch_panel_fitting(pipe_config, conn_state);
2624
		else
2625 2626 2627
			ret = intel_pch_panel_fitting(pipe_config, conn_state);
		if (ret)
			return ret;
2628 2629
	}

2630
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2631
		return -EINVAL;
2632

R
Rodrigo Vivi 已提交
2633
	if (HAS_GMCH(dev_priv) &&
2634
	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2635
		return -EINVAL;
2636 2637

	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2638
		return -EINVAL;
2639

2640 2641 2642
	if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
		return -EINVAL;

2643 2644 2645
	ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
	if (ret < 0)
		return ret;
2646

2647 2648
	pipe_config->limited_color_range =
		intel_dp_limited_color_range(pipe_config, conn_state);
2649

2650 2651
	if (pipe_config->dsc.compression_enable)
		output_bpp = pipe_config->dsc.compressed_bpp;
2652
	else
2653
		output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2654 2655 2656 2657 2658 2659

	intel_link_compute_m_n(output_bpp,
			       pipe_config->lane_count,
			       adjusted_mode->crtc_clock,
			       pipe_config->port_clock,
			       &pipe_config->dp_m_n,
2660
			       constant_n, pipe_config->fec_enable);
2661

2662
	if (intel_connector->panel.downclock_mode != NULL &&
2663
		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2664
			pipe_config->has_drrs = true;
2665
			intel_link_compute_m_n(output_bpp,
2666 2667 2668 2669
					       pipe_config->lane_count,
					       intel_connector->panel.downclock_mode->clock,
					       pipe_config->port_clock,
					       &pipe_config->dp_m2_n2,
2670
					       constant_n, pipe_config->fec_enable);
2671 2672
	}

2673
	if (!HAS_DDI(dev_priv))
2674
		intel_dp_set_clock(encoder, pipe_config);
2675

2676
	intel_psr_compute_config(intel_dp, pipe_config);
2677
	intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
2678
	intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
2679

2680
	return 0;
2681 2682
}

2683
void intel_dp_set_link_params(struct intel_dp *intel_dp,
2684
			      int link_rate, u8 lane_count,
2685
			      bool link_mst)
2686
{
2687
	intel_dp->link_trained = false;
2688 2689 2690
	intel_dp->link_rate = link_rate;
	intel_dp->lane_count = lane_count;
	intel_dp->link_mst = link_mst;
2691 2692
}

2693
static void intel_dp_prepare(struct intel_encoder *encoder,
2694
			     const struct intel_crtc_state *pipe_config)
2695
{
2696
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2697
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2698
	enum port port = encoder->port;
2699
	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2700
	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2701

2702 2703 2704 2705
	intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
				 pipe_config->lane_count,
				 intel_crtc_has_type(pipe_config,
						     INTEL_OUTPUT_DP_MST));
2706

2707
	/*
K
Keith Packard 已提交
2708
	 * There are four kinds of DP registers:
2709 2710
	 *
	 * 	IBX PCH
K
Keith Packard 已提交
2711 2712
	 * 	SNB CPU
	 *	IVB CPU
2713 2714 2715 2716 2717 2718 2719 2720
	 * 	CPT PCH
	 *
	 * IBX PCH and CPU are the same for almost everything,
	 * except that the CPU DP PLL is configured in this
	 * register
	 *
	 * CPT PCH is quite different, having many bits moved
	 * to the TRANS_DP_CTL register instead. That
2721
	 * configuration happens (oddly) in ilk_pch_enable
2722
	 */
2723

2724 2725 2726
	/* Preserve the BIOS-computed detected bit. This is
	 * supposed to be read-only.
	 */
2727
	intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
2728

2729 2730
	/* Handle DP bits in common between all three register formats */
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2731
	intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2732

2733
	/* Split out the IBX/CPU vs CPT settings */
2734

2735
	if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
K
Keith Packard 已提交
2736 2737 2738 2739 2740 2741
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
			intel_dp->DP |= DP_SYNC_HS_HIGH;
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
			intel_dp->DP |= DP_SYNC_VS_HIGH;
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;

2742
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
K
Keith Packard 已提交
2743 2744
			intel_dp->DP |= DP_ENHANCED_FRAMING;

2745
		intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2746
	} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2747 2748
		u32 trans_dp;

2749
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2750

2751
		trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
2752 2753 2754 2755
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
			trans_dp |= TRANS_DP_ENH_FRAMING;
		else
			trans_dp &= ~TRANS_DP_ENH_FRAMING;
2756
		intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
2757
	} else {
2758
		if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2759
			intel_dp->DP |= DP_COLOR_RANGE_16_235;
2760 2761 2762 2763 2764 2765 2766

		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
			intel_dp->DP |= DP_SYNC_HS_HIGH;
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
			intel_dp->DP |= DP_SYNC_VS_HIGH;
		intel_dp->DP |= DP_LINK_TRAIN_OFF;

2767
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2768 2769
			intel_dp->DP |= DP_ENHANCED_FRAMING;

2770
		if (IS_CHERRYVIEW(dev_priv))
2771 2772 2773
			intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
		else
			intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2774
	}
2775 2776
}

2777 2778
#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2779

2780 2781
#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
2782

2783 2784
#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2785

2786
static void intel_pps_verify_state(struct intel_dp *intel_dp);
I
Imre Deak 已提交
2787

2788
static void wait_panel_status(struct intel_dp *intel_dp,
2789 2790
				       u32 mask,
				       u32 value)
2791
{
2792
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2793
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2794

V
Ville Syrjälä 已提交
2795 2796
	lockdep_assert_held(&dev_priv->pps_mutex);

2797
	intel_pps_verify_state(intel_dp);
I
Imre Deak 已提交
2798

2799 2800
	pp_stat_reg = _pp_stat_reg(intel_dp);
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2801

2802 2803 2804
	drm_dbg_kms(&dev_priv->drm,
		    "mask %08x value %08x status %08x control %08x\n",
		    mask, value,
2805 2806
		    intel_de_read(dev_priv, pp_stat_reg),
		    intel_de_read(dev_priv, pp_ctrl_reg));
2807

2808 2809
	if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
				       mask, value, 5000))
2810 2811
		drm_err(&dev_priv->drm,
			"Panel status timeout: status %08x control %08x\n",
2812 2813
			intel_de_read(dev_priv, pp_stat_reg),
			intel_de_read(dev_priv, pp_ctrl_reg));
2814

2815
	drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
2816
}
2817

2818
static void wait_panel_on(struct intel_dp *intel_dp)
2819
{
2820 2821 2822
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
2823
	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2824 2825
}

2826
static void wait_panel_off(struct intel_dp *intel_dp)
2827
{
2828 2829 2830
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
2831
	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2832 2833
}

2834
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2835
{
2836
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2837 2838 2839
	ktime_t panel_power_on_time;
	s64 panel_power_off_duration;

2840
	drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
2841

2842 2843 2844 2845 2846
	/* take the difference of currrent time and panel power off time
	 * and then make panel wait for t11_t12 if needed. */
	panel_power_on_time = ktime_get_boottime();
	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);

2847 2848
	/* When we disable the VDD override bit last we have to do the manual
	 * wait. */
2849 2850 2851
	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
		wait_remaining_ms_from_jiffies(jiffies,
				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2852

2853
	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2854 2855
}

2856
static void wait_backlight_on(struct intel_dp *intel_dp)
2857 2858 2859 2860 2861
{
	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
				       intel_dp->backlight_on_delay);
}

2862
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2863 2864 2865 2866
{
	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
				       intel_dp->backlight_off_delay);
}
2867

2868 2869 2870 2871
/* Read the current pp_control value, unlocking the register if it
 * is locked
 */

2872
static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
2873
{
2874
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2875
	u32 control;
2876

V
Ville Syrjälä 已提交
2877 2878
	lockdep_assert_held(&dev_priv->pps_mutex);

2879
	control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
2880 2881
	if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
			(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2882 2883 2884
		control &= ~PANEL_UNLOCK_MASK;
		control |= PANEL_UNLOCK_REGS;
	}
2885
	return control;
2886 2887
}

2888 2889 2890 2891 2892
/*
 * Must be paired with edp_panel_vdd_off().
 * Must hold pps_mutex around the whole on/off sequence.
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
 */
2893
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2894
{
2895
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2896
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2897
	u32 pp;
2898
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2899
	bool need_to_disable = !intel_dp->want_panel_vdd;
2900

V
Ville Syrjälä 已提交
2901 2902
	lockdep_assert_held(&dev_priv->pps_mutex);

2903
	if (!intel_dp_is_edp(intel_dp))
2904
		return false;
2905

2906
	cancel_delayed_work(&intel_dp->panel_vdd_work);
2907
	intel_dp->want_panel_vdd = true;
2908

2909
	if (edp_have_panel_vdd(intel_dp))
2910
		return need_to_disable;
2911

2912
	intel_display_power_get(dev_priv,
2913
				intel_aux_power_domain(dig_port));
2914

2915
	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
2916 2917
		    dig_port->base.base.base.id,
		    dig_port->base.base.name);
2918

2919 2920
	if (!edp_have_panel_power(intel_dp))
		wait_panel_power_cycle(intel_dp);
2921

2922
	pp = ilk_get_pp_control(intel_dp);
2923
	pp |= EDP_FORCE_VDD;
2924

2925 2926
	pp_stat_reg = _pp_stat_reg(intel_dp);
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2927

2928 2929
	intel_de_write(dev_priv, pp_ctrl_reg, pp);
	intel_de_posting_read(dev_priv, pp_ctrl_reg);
2930
	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2931 2932
		    intel_de_read(dev_priv, pp_stat_reg),
		    intel_de_read(dev_priv, pp_ctrl_reg));
2933 2934 2935
	/*
	 * If the panel wasn't on, delay before accessing aux channel
	 */
2936
	if (!edp_have_panel_power(intel_dp)) {
2937 2938
		drm_dbg_kms(&dev_priv->drm,
			    "[ENCODER:%d:%s] panel power wasn't enabled\n",
2939 2940
			    dig_port->base.base.base.id,
			    dig_port->base.base.name);
2941 2942
		msleep(intel_dp->panel_power_up_delay);
	}
2943 2944 2945 2946

	return need_to_disable;
}

2947 2948 2949 2950 2951 2952 2953
/*
 * Must be paired with intel_edp_panel_vdd_off() or
 * intel_edp_panel_off().
 * Nested calls to these functions are not allowed since
 * we drop the lock. Caller must use some higher level
 * locking to prevent nested calls from other threads.
 */
2954
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2955
{
2956
	intel_wakeref_t wakeref;
2957
	bool vdd;
2958

2959
	if (!intel_dp_is_edp(intel_dp))
2960 2961
		return;

2962 2963 2964
	vdd = false;
	with_pps_lock(intel_dp, wakeref)
		vdd = edp_panel_vdd_on(intel_dp);
2965 2966 2967
	I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
			dp_to_dig_port(intel_dp)->base.base.base.id,
			dp_to_dig_port(intel_dp)->base.base.name);
2968 2969
}

2970
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2971
{
2972
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2973
	struct intel_digital_port *dig_port =
2974
		dp_to_dig_port(intel_dp);
2975
	u32 pp;
2976
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2977

V
Ville Syrjälä 已提交
2978
	lockdep_assert_held(&dev_priv->pps_mutex);
2979

2980
	drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
2981

2982
	if (!edp_have_panel_vdd(intel_dp))
2983
		return;
2984

2985
	drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
2986 2987
		    dig_port->base.base.base.id,
		    dig_port->base.base.name);
2988

2989
	pp = ilk_get_pp_control(intel_dp);
2990
	pp &= ~EDP_FORCE_VDD;
2991

2992 2993
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
	pp_stat_reg = _pp_stat_reg(intel_dp);
2994

2995 2996
	intel_de_write(dev_priv, pp_ctrl_reg, pp);
	intel_de_posting_read(dev_priv, pp_ctrl_reg);
P
Paulo Zanoni 已提交
2997

2998
	/* Make sure sequencer is idle before allowing subsequent activity */
2999
	drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3000 3001
		    intel_de_read(dev_priv, pp_stat_reg),
		    intel_de_read(dev_priv, pp_ctrl_reg));
3002

3003
	if ((pp & PANEL_POWER_ON) == 0)
3004
		intel_dp->panel_power_off_time = ktime_get_boottime();
3005

3006
	intel_display_power_put_unchecked(dev_priv,
3007
					  intel_aux_power_domain(dig_port));
3008
}
3009

3010
static void edp_panel_vdd_work(struct work_struct *__work)
3011
{
3012 3013 3014 3015
	struct intel_dp *intel_dp =
		container_of(to_delayed_work(__work),
			     struct intel_dp, panel_vdd_work);
	intel_wakeref_t wakeref;
3016

3017 3018 3019 3020
	with_pps_lock(intel_dp, wakeref) {
		if (!intel_dp->want_panel_vdd)
			edp_panel_vdd_off_sync(intel_dp);
	}
3021 3022
}

3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
	unsigned long delay;

	/*
	 * Queue the timer to fire a long time from now (relative to the power
	 * down delay) to keep the panel power up across a sequence of
	 * operations.
	 */
	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
}

3036 3037 3038 3039 3040
/*
 * Must be paired with edp_panel_vdd_on().
 * Must hold pps_mutex around the whole on/off sequence.
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
 */
3041
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
3042
{
3043
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
V
Ville Syrjälä 已提交
3044 3045 3046

	lockdep_assert_held(&dev_priv->pps_mutex);

3047
	if (!intel_dp_is_edp(intel_dp))
3048
		return;
3049

3050 3051 3052
	I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
			dp_to_dig_port(intel_dp)->base.base.base.id,
			dp_to_dig_port(intel_dp)->base.base.name);
3053

3054 3055
	intel_dp->want_panel_vdd = false;

3056
	if (sync)
3057
		edp_panel_vdd_off_sync(intel_dp);
3058 3059
	else
		edp_panel_vdd_schedule_off(intel_dp);
3060 3061
}

3062
static void edp_panel_on(struct intel_dp *intel_dp)
3063
{
3064
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3065
	u32 pp;
3066
	i915_reg_t pp_ctrl_reg;
3067

3068 3069
	lockdep_assert_held(&dev_priv->pps_mutex);

3070
	if (!intel_dp_is_edp(intel_dp))
3071
		return;
3072

3073 3074 3075
	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
		    dp_to_dig_port(intel_dp)->base.base.base.id,
		    dp_to_dig_port(intel_dp)->base.base.name);
V
Ville Syrjälä 已提交
3076

3077 3078 3079 3080
	if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
		     "[ENCODER:%d:%s] panel power already on\n",
		     dp_to_dig_port(intel_dp)->base.base.base.id,
		     dp_to_dig_port(intel_dp)->base.base.name))
3081
		return;
3082

3083
	wait_panel_power_cycle(intel_dp);
3084

3085
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3086
	pp = ilk_get_pp_control(intel_dp);
3087
	if (IS_GEN(dev_priv, 5)) {
3088 3089
		/* ILK workaround: disable reset around power sequence */
		pp &= ~PANEL_POWER_RESET;
3090 3091
		intel_de_write(dev_priv, pp_ctrl_reg, pp);
		intel_de_posting_read(dev_priv, pp_ctrl_reg);
3092
	}
3093

3094
	pp |= PANEL_POWER_ON;
3095
	if (!IS_GEN(dev_priv, 5))
3096 3097
		pp |= PANEL_POWER_RESET;

3098 3099
	intel_de_write(dev_priv, pp_ctrl_reg, pp);
	intel_de_posting_read(dev_priv, pp_ctrl_reg);
3100

3101
	wait_panel_on(intel_dp);
3102
	intel_dp->last_power_on = jiffies;
3103

3104
	if (IS_GEN(dev_priv, 5)) {
3105
		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
3106 3107
		intel_de_write(dev_priv, pp_ctrl_reg, pp);
		intel_de_posting_read(dev_priv, pp_ctrl_reg);
3108
	}
3109
}
V
Ville Syrjälä 已提交
3110

3111 3112
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
3113 3114
	intel_wakeref_t wakeref;

3115
	if (!intel_dp_is_edp(intel_dp))
3116 3117
		return;

3118 3119
	with_pps_lock(intel_dp, wakeref)
		edp_panel_on(intel_dp);
3120 3121
}

3122 3123

static void edp_panel_off(struct intel_dp *intel_dp)
3124
{
3125
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3126
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3127
	u32 pp;
3128
	i915_reg_t pp_ctrl_reg;
3129

3130 3131
	lockdep_assert_held(&dev_priv->pps_mutex);

3132
	if (!intel_dp_is_edp(intel_dp))
3133
		return;
3134

3135 3136
	drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
		    dig_port->base.base.base.id, dig_port->base.base.name);
3137

3138 3139 3140
	drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
		 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
		 dig_port->base.base.base.id, dig_port->base.base.name);
3141

3142
	pp = ilk_get_pp_control(intel_dp);
3143 3144
	/* We need to switch off panel power _and_ force vdd, for otherwise some
	 * panels get very unhappy and cease to work. */
3145
	pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
3146
		EDP_BLC_ENABLE);
3147

3148
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3149

3150 3151
	intel_dp->want_panel_vdd = false;

3152 3153
	intel_de_write(dev_priv, pp_ctrl_reg, pp);
	intel_de_posting_read(dev_priv, pp_ctrl_reg);
3154

3155
	wait_panel_off(intel_dp);
3156
	intel_dp->panel_power_off_time = ktime_get_boottime();
3157 3158

	/* We got a reference when we enabled the VDD. */
3159
	intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
3160
}
V
Ville Syrjälä 已提交
3161

3162 3163
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
3164 3165
	intel_wakeref_t wakeref;

3166
	if (!intel_dp_is_edp(intel_dp))
3167
		return;
V
Ville Syrjälä 已提交
3168

3169 3170
	with_pps_lock(intel_dp, wakeref)
		edp_panel_off(intel_dp);
3171 3172
}

3173 3174
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
3175
{
3176
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3177
	intel_wakeref_t wakeref;
3178

3179 3180 3181 3182 3183 3184
	/*
	 * If we enable the backlight right away following a panel power
	 * on, we may see slight flicker as the panel syncs with the eDP
	 * link.  So delay a bit to make sure the image is solid before
	 * allowing it to appear.
	 */
3185
	wait_backlight_on(intel_dp);
V
Ville Syrjälä 已提交
3186

3187 3188 3189
	with_pps_lock(intel_dp, wakeref) {
		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
		u32 pp;
3190

3191
		pp = ilk_get_pp_control(intel_dp);
3192
		pp |= EDP_BLC_ENABLE;
3193

3194 3195
		intel_de_write(dev_priv, pp_ctrl_reg, pp);
		intel_de_posting_read(dev_priv, pp_ctrl_reg);
3196
	}
3197 3198
}

3199
/* Enable backlight PWM and backlight PP control. */
3200 3201
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
			    const struct drm_connector_state *conn_state)
3202
{
3203
	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
3204
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3205

3206
	if (!intel_dp_is_edp(intel_dp))
3207 3208
		return;

3209
	drm_dbg_kms(&i915->drm, "\n");
3210

3211
	intel_panel_enable_backlight(crtc_state, conn_state);
3212 3213 3214 3215 3216
	_intel_edp_backlight_on(intel_dp);
}

/* Disable backlight in the panel power control. */
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
3217
{
3218
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3219
	intel_wakeref_t wakeref;
3220

3221
	if (!intel_dp_is_edp(intel_dp))
3222 3223
		return;

3224 3225 3226
	with_pps_lock(intel_dp, wakeref) {
		i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
		u32 pp;
V
Ville Syrjälä 已提交
3227

3228
		pp = ilk_get_pp_control(intel_dp);
3229
		pp &= ~EDP_BLC_ENABLE;
3230

3231 3232
		intel_de_write(dev_priv, pp_ctrl_reg, pp);
		intel_de_posting_read(dev_priv, pp_ctrl_reg);
3233
	}
V
Ville Syrjälä 已提交
3234 3235

	intel_dp->last_backlight_off = jiffies;
3236
	edp_wait_backlight_off(intel_dp);
3237
}
3238

3239
/* Disable backlight PP control and backlight PWM. */
3240
void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3241
{
3242
	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3243
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3244

3245
	if (!intel_dp_is_edp(intel_dp))
3246 3247
		return;

3248
	drm_dbg_kms(&i915->drm, "\n");
3249

3250
	_intel_edp_backlight_off(intel_dp);
3251
	intel_panel_disable_backlight(old_conn_state);
3252
}
3253

3254 3255 3256 3257 3258 3259 3260
/*
 * Hook for controlling the panel power control backlight through the bl_power
 * sysfs attribute. Take care to handle multiple calls.
 */
static void intel_edp_backlight_power(struct intel_connector *connector,
				      bool enable)
{
3261
	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3262
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3263
	intel_wakeref_t wakeref;
V
Ville Syrjälä 已提交
3264 3265
	bool is_enabled;

3266 3267
	is_enabled = false;
	with_pps_lock(intel_dp, wakeref)
3268
		is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
3269 3270 3271
	if (is_enabled == enable)
		return;

3272 3273
	drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
		    enable ? "enable" : "disable");
3274 3275 3276 3277 3278 3279 3280

	if (enable)
		_intel_edp_backlight_on(intel_dp);
	else
		_intel_edp_backlight_off(intel_dp);
}

3281 3282 3283 3284
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3285
	bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
3286 3287

	I915_STATE_WARN(cur_state != state,
3288 3289
			"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
			dig_port->base.base.base.id, dig_port->base.base.name,
3290
			onoff(state), onoff(cur_state));
3291 3292 3293 3294 3295
}
#define assert_dp_port_disabled(d) assert_dp_port((d), false)

static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
3296
	bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
3297 3298 3299

	I915_STATE_WARN(cur_state != state,
			"eDP PLL state assertion failure (expected %s, current %s)\n",
3300
			onoff(state), onoff(cur_state));
3301 3302 3303 3304
}
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)

3305 3306
static void ilk_edp_pll_on(struct intel_dp *intel_dp,
			   const struct intel_crtc_state *pipe_config)
3307
{
3308
	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3309
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3310

3311
	assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
3312 3313
	assert_dp_port_disabled(intel_dp);
	assert_edp_pll_disabled(dev_priv);
3314

3315 3316
	drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
		    pipe_config->port_clock);
3317 3318 3319

	intel_dp->DP &= ~DP_PLL_FREQ_MASK;

3320
	if (pipe_config->port_clock == 162000)
3321 3322 3323 3324
		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
	else
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;

3325 3326
	intel_de_write(dev_priv, DP_A, intel_dp->DP);
	intel_de_posting_read(dev_priv, DP_A);
3327 3328
	udelay(500);

3329 3330 3331 3332 3333 3334
	/*
	 * [DevILK] Work around required when enabling DP PLL
	 * while a pipe is enabled going to FDI:
	 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
	 * 2. Program DP PLL enable
	 */
3335
	if (IS_GEN(dev_priv, 5))
3336
		intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
3337

3338
	intel_dp->DP |= DP_PLL_ENABLE;
3339

3340 3341
	intel_de_write(dev_priv, DP_A, intel_dp->DP);
	intel_de_posting_read(dev_priv, DP_A);
3342
	udelay(200);
3343 3344
}

3345 3346
static void ilk_edp_pll_off(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *old_crtc_state)
3347
{
3348
	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3349
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3350

3351
	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3352 3353
	assert_dp_port_disabled(intel_dp);
	assert_edp_pll_enabled(dev_priv);
3354

3355
	drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
3356

3357
	intel_dp->DP &= ~DP_PLL_ENABLE;
3358

3359 3360
	intel_de_write(dev_priv, DP_A, intel_dp->DP);
	intel_de_posting_read(dev_priv, DP_A);
3361 3362 3363
	udelay(200);
}

3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
{
	/*
	 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
	 * be capable of signalling downstream hpd with a long pulse.
	 * Whether or not that means D3 is safe to use is not clear,
	 * but let's assume so until proven otherwise.
	 *
	 * FIXME should really check all downstream ports...
	 */
	return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3375
		drm_dp_is_branch(intel_dp->dpcd) &&
3376 3377 3378
		intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}

3379 3380 3381 3382
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
					   const struct intel_crtc_state *crtc_state,
					   bool enable)
{
3383
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3384 3385
	int ret;

3386
	if (!crtc_state->dsc.compression_enable)
3387 3388 3389 3390 3391
		return;

	ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
				 enable ? DP_DECOMPRESSION_EN : 0);
	if (ret < 0)
3392 3393 3394
		drm_dbg_kms(&i915->drm,
			    "Failed to %s sink decompression state\n",
			    enable ? "enable" : "disable");
3395 3396
}

3397
/* If the sink supports it, try to set the power state appropriately */
3398
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
3399
{
3400
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3401 3402 3403 3404 3405 3406 3407
	int ret, i;

	/* Should have a valid DPCD by this point */
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
		return;

	if (mode != DRM_MODE_DPMS_ON) {
3408 3409 3410
		if (downstream_hpd_needs_d0(intel_dp))
			return;

3411 3412
		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
					 DP_SET_POWER_D3);
3413
	} else {
3414 3415
		struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);

3416 3417 3418 3419 3420
		/*
		 * When turning on, we need to retry for 1ms to give the sink
		 * time to wake up.
		 */
		for (i = 0; i < 3; i++) {
3421 3422
			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
						 DP_SET_POWER_D0);
3423 3424 3425 3426
			if (ret == 1)
				break;
			msleep(1);
		}
3427 3428 3429

		if (ret == 1 && lspcon->active)
			lspcon_wait_pcon_mode(lspcon);
3430
	}
3431 3432

	if (ret != 1)
3433 3434
		drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
			    mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3435 3436
}

3437 3438 3439 3440 3441 3442
static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
				 enum port port, enum pipe *pipe)
{
	enum pipe p;

	for_each_pipe(dev_priv, p) {
3443
		u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
3444 3445 3446 3447 3448 3449 3450

		if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
			*pipe = p;
			return true;
		}
	}

3451 3452
	drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
		    port_name(port));
3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466

	/* must initialize pipe to something for the asserts */
	*pipe = PIPE_A;

	return false;
}

bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
			   i915_reg_t dp_reg, enum port port,
			   enum pipe *pipe)
{
	bool ret;
	u32 val;

3467
	val = intel_de_read(dev_priv, dp_reg);
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483

	ret = val & DP_PORT_EN;

	/* asserts want to know the pipe even if the port is disabled */
	if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
		*pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
	else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
		ret &= cpt_dp_port_selected(dev_priv, port, pipe);
	else if (IS_CHERRYVIEW(dev_priv))
		*pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
	else
		*pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;

	return ret;
}

3484 3485
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
				  enum pipe *pipe)
3486
{
3487
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3488
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3489
	intel_wakeref_t wakeref;
3490
	bool ret;
3491

3492 3493 3494
	wakeref = intel_display_power_get_if_enabled(dev_priv,
						     encoder->power_domain);
	if (!wakeref)
3495 3496
		return false;

3497 3498
	ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
				    encoder->port, pipe);
3499

3500
	intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3501 3502

	return ret;
3503
}
3504

3505
static void intel_dp_get_config(struct intel_encoder *encoder,
3506
				struct intel_crtc_state *pipe_config)
3507
{
3508
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3509
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3510
	u32 tmp, flags = 0;
3511
	enum port port = encoder->port;
3512
	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3513

3514 3515 3516 3517
	if (encoder->type == INTEL_OUTPUT_EDP)
		pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
	else
		pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3518

3519
	tmp = intel_de_read(dev_priv, intel_dp->output_reg);
3520 3521

	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3522

3523
	if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3524 3525
		u32 trans_dp = intel_de_read(dev_priv,
					     TRANS_DP_CTL(crtc->pipe));
3526 3527

		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3528 3529 3530
			flags |= DRM_MODE_FLAG_PHSYNC;
		else
			flags |= DRM_MODE_FLAG_NHSYNC;
3531

3532
		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3533 3534 3535 3536
			flags |= DRM_MODE_FLAG_PVSYNC;
		else
			flags |= DRM_MODE_FLAG_NVSYNC;
	} else {
3537
		if (tmp & DP_SYNC_HS_HIGH)
3538 3539 3540
			flags |= DRM_MODE_FLAG_PHSYNC;
		else
			flags |= DRM_MODE_FLAG_NHSYNC;
3541

3542
		if (tmp & DP_SYNC_VS_HIGH)
3543 3544 3545 3546
			flags |= DRM_MODE_FLAG_PVSYNC;
		else
			flags |= DRM_MODE_FLAG_NVSYNC;
	}
3547

3548
	pipe_config->hw.adjusted_mode.flags |= flags;
3549

3550
	if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3551 3552
		pipe_config->limited_color_range = true;

3553 3554 3555
	pipe_config->lane_count =
		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;

3556 3557
	intel_dp_get_m_n(crtc, pipe_config);

3558
	if (port == PORT_A) {
3559
		if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3560 3561 3562 3563
			pipe_config->port_clock = 162000;
		else
			pipe_config->port_clock = 270000;
	}
3564

3565
	pipe_config->hw.adjusted_mode.crtc_clock =
3566 3567
		intel_dotclock_calculate(pipe_config->port_clock,
					 &pipe_config->dp_m_n);
3568

3569
	if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3570
	    pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583
		/*
		 * This is a big fat ugly hack.
		 *
		 * Some machines in UEFI boot mode provide us a VBT that has 18
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
		 * unknown we fail to light up. Yet the same BIOS boots up with
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
		 * max, not what it tells us to use.
		 *
		 * Note: This will still be broken if the eDP panel is not lit
		 * up by the BIOS, and thus we can't get the mode at module
		 * load.
		 */
3584 3585 3586
		drm_dbg_kms(&dev_priv->drm,
			    "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
			    pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3587
		dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3588
	}
3589 3590
}

3591 3592
static void intel_disable_dp(struct intel_atomic_state *state,
			     struct intel_encoder *encoder,
3593 3594
			     const struct intel_crtc_state *old_crtc_state,
			     const struct drm_connector_state *old_conn_state)
3595
{
3596
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3597

3598 3599
	intel_dp->link_trained = false;

3600
	if (old_crtc_state->has_audio)
3601 3602
		intel_audio_codec_disable(encoder,
					  old_crtc_state, old_conn_state);
3603 3604 3605

	/* Make sure the panel is off before trying to change the mode. But also
	 * ensure that we have vdd while we switch off the panel. */
3606
	intel_edp_panel_vdd_on(intel_dp);
3607
	intel_edp_backlight_off(old_conn_state);
3608
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3609
	intel_edp_panel_off(intel_dp);
3610 3611
}

3612 3613
static void g4x_disable_dp(struct intel_atomic_state *state,
			   struct intel_encoder *encoder,
3614 3615 3616
			   const struct intel_crtc_state *old_crtc_state,
			   const struct drm_connector_state *old_conn_state)
{
3617
	intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3618 3619
}

3620 3621
static void vlv_disable_dp(struct intel_atomic_state *state,
			   struct intel_encoder *encoder,
3622 3623 3624
			   const struct intel_crtc_state *old_crtc_state,
			   const struct drm_connector_state *old_conn_state)
{
3625
	intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3626 3627
}

3628 3629
static void g4x_post_disable_dp(struct intel_atomic_state *state,
				struct intel_encoder *encoder,
3630 3631
				const struct intel_crtc_state *old_crtc_state,
				const struct drm_connector_state *old_conn_state)
3632
{
3633
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3634
	enum port port = encoder->port;
3635

3636 3637 3638 3639 3640 3641
	/*
	 * Bspec does not list a specific disable sequence for g4x DP.
	 * Follow the ilk+ sequence (disable pipe before the port) for
	 * g4x DP as it does not suffer from underruns like the normal
	 * g4x modeset sequence (disable pipe after the port).
	 */
3642
	intel_dp_link_down(encoder, old_crtc_state);
3643 3644

	/* Only ilk+ has port A */
3645
	if (port == PORT_A)
3646
		ilk_edp_pll_off(intel_dp, old_crtc_state);
3647 3648
}

3649 3650
static void vlv_post_disable_dp(struct intel_atomic_state *state,
				struct intel_encoder *encoder,
3651 3652
				const struct intel_crtc_state *old_crtc_state,
				const struct drm_connector_state *old_conn_state)
3653
{
3654
	intel_dp_link_down(encoder, old_crtc_state);
3655 3656
}

3657 3658
static void chv_post_disable_dp(struct intel_atomic_state *state,
				struct intel_encoder *encoder,
3659 3660
				const struct intel_crtc_state *old_crtc_state,
				const struct drm_connector_state *old_conn_state)
3661
{
3662
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3663

3664
	intel_dp_link_down(encoder, old_crtc_state);
3665

3666
	vlv_dpio_get(dev_priv);
3667 3668

	/* Assert data lane reset */
3669
	chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3670

3671
	vlv_dpio_put(dev_priv);
3672 3673
}

3674
static void
3675 3676
cpt_set_link_train(struct intel_dp *intel_dp,
		   u8 dp_train_pat)
3677
{
3678
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3679
	u32 *DP = &intel_dp->DP;
3680

3681
	*DP &= ~DP_LINK_TRAIN_MASK_CPT;
3682

3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698
	switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
	case DP_TRAINING_PATTERN_DISABLE:
		*DP |= DP_LINK_TRAIN_OFF_CPT;
		break;
	case DP_TRAINING_PATTERN_1:
		*DP |= DP_LINK_TRAIN_PAT_1_CPT;
		break;
	case DP_TRAINING_PATTERN_2:
		*DP |= DP_LINK_TRAIN_PAT_2_CPT;
		break;
	case DP_TRAINING_PATTERN_3:
		drm_dbg_kms(&dev_priv->drm,
			    "TPS3 not supported, using TPS2 instead\n");
		*DP |= DP_LINK_TRAIN_PAT_2_CPT;
		break;
	}
3699

3700 3701 3702
	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
3703

3704 3705 3706 3707 3708 3709
static void
g4x_set_link_train(struct intel_dp *intel_dp,
		   u8 dp_train_pat)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 *DP = &intel_dp->DP;
3710

3711
	*DP &= ~DP_LINK_TRAIN_MASK;
3712

3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727
	switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
	case DP_TRAINING_PATTERN_DISABLE:
		*DP |= DP_LINK_TRAIN_OFF;
		break;
	case DP_TRAINING_PATTERN_1:
		*DP |= DP_LINK_TRAIN_PAT_1;
		break;
	case DP_TRAINING_PATTERN_2:
		*DP |= DP_LINK_TRAIN_PAT_2;
		break;
	case DP_TRAINING_PATTERN_3:
		drm_dbg_kms(&dev_priv->drm,
			    "TPS3 not supported, using TPS2 instead\n");
		*DP |= DP_LINK_TRAIN_PAT_2;
		break;
3728
	}
3729 3730 3731

	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
3732 3733
}

3734
static void intel_dp_enable_port(struct intel_dp *intel_dp,
3735
				 const struct intel_crtc_state *old_crtc_state)
3736
{
3737
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3738 3739 3740

	/* enable with pattern 1 (as per spec) */

3741
	intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3742 3743 3744 3745 3746 3747 3748 3749

	/*
	 * Magic for VLV/CHV. We _must_ first set up the register
	 * without actually enabling the port, and then do another
	 * write to enable the port. Otherwise link training will
	 * fail when the power sequencer is freshly used for this port.
	 */
	intel_dp->DP |= DP_PORT_EN;
3750
	if (old_crtc_state->has_audio)
3751
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3752

3753 3754
	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
3755 3756
}

3757 3758
static void intel_enable_dp(struct intel_atomic_state *state,
			    struct intel_encoder *encoder,
3759 3760
			    const struct intel_crtc_state *pipe_config,
			    const struct drm_connector_state *conn_state)
3761
{
3762
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3763
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3764
	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3765
	u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
3766
	enum pipe pipe = crtc->pipe;
3767
	intel_wakeref_t wakeref;
3768

3769
	if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
3770
		return;
3771

3772 3773 3774
	with_pps_lock(intel_dp, wakeref) {
		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
			vlv_init_panel_power_sequencer(encoder, pipe_config);
3775

3776
		intel_dp_enable_port(intel_dp, pipe_config);
3777

3778 3779 3780 3781
		edp_panel_vdd_on(intel_dp);
		edp_panel_on(intel_dp);
		edp_panel_vdd_off(intel_dp, true);
	}
3782

3783
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3784 3785
		unsigned int lane_mask = 0x0;

3786
		if (IS_CHERRYVIEW(dev_priv))
3787
			lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3788

3789 3790
		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
				    lane_mask);
3791
	}
3792

3793
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3794
	intel_dp_start_link_train(intel_dp);
3795
	intel_dp_stop_link_train(intel_dp);
3796

3797
	if (pipe_config->has_audio) {
3798 3799
		drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
			pipe_name(pipe));
3800
		intel_audio_codec_enable(encoder, pipe_config, conn_state);
3801
	}
3802
}
3803

3804 3805
static void g4x_enable_dp(struct intel_atomic_state *state,
			  struct intel_encoder *encoder,
3806 3807
			  const struct intel_crtc_state *pipe_config,
			  const struct drm_connector_state *conn_state)
3808
{
3809
	intel_enable_dp(state, encoder, pipe_config, conn_state);
3810
	intel_edp_backlight_on(pipe_config, conn_state);
3811
}
3812

3813 3814
static void vlv_enable_dp(struct intel_atomic_state *state,
			  struct intel_encoder *encoder,
3815 3816
			  const struct intel_crtc_state *pipe_config,
			  const struct drm_connector_state *conn_state)
3817
{
3818
	intel_edp_backlight_on(pipe_config, conn_state);
3819 3820
}

3821 3822
static void g4x_pre_enable_dp(struct intel_atomic_state *state,
			      struct intel_encoder *encoder,
3823 3824
			      const struct intel_crtc_state *pipe_config,
			      const struct drm_connector_state *conn_state)
3825
{
3826
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3827
	enum port port = encoder->port;
3828

3829
	intel_dp_prepare(encoder, pipe_config);
3830

3831
	/* Only ilk+ has port A */
3832
	if (port == PORT_A)
3833
		ilk_edp_pll_on(intel_dp, pipe_config);
3834 3835
}

3836 3837
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
3838 3839
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3840
	enum pipe pipe = intel_dp->pps_pipe;
3841
	i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3842

3843
	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
3844

3845
	if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
3846 3847
		return;

3848 3849 3850
	edp_panel_vdd_off_sync(intel_dp);

	/*
3851
	 * VLV seems to get confused when multiple power sequencers
3852 3853 3854
	 * have the same port selected (even if only one has power/vdd
	 * enabled). The failure manifests as vlv_wait_port_ready() failing
	 * CHV on the other hand doesn't seem to mind having the same port
3855
	 * selected in multiple power sequencers, but let's clear the
3856 3857 3858
	 * port select always when logically disconnecting a power sequencer
	 * from a port.
	 */
3859 3860
	drm_dbg_kms(&dev_priv->drm,
		    "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
3861 3862
		    pipe_name(pipe), dig_port->base.base.base.id,
		    dig_port->base.base.name);
3863 3864
	intel_de_write(dev_priv, pp_on_reg, 0);
	intel_de_posting_read(dev_priv, pp_on_reg);
3865 3866 3867 3868

	intel_dp->pps_pipe = INVALID_PIPE;
}

3869
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3870 3871 3872 3873 3874 3875
				      enum pipe pipe)
{
	struct intel_encoder *encoder;

	lockdep_assert_held(&dev_priv->pps_mutex);

3876
	for_each_intel_dp(&dev_priv->drm, encoder) {
3877
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3878

3879 3880 3881 3882
		drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
			 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
			 pipe_name(pipe), encoder->base.base.id,
			 encoder->base.name);
3883

3884 3885 3886
		if (intel_dp->pps_pipe != pipe)
			continue;

3887 3888 3889 3890
		drm_dbg_kms(&dev_priv->drm,
			    "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
			    pipe_name(pipe), encoder->base.base.id,
			    encoder->base.name);
3891 3892

		/* make sure vdd is off before we steal it */
3893
		vlv_detach_power_sequencer(intel_dp);
3894 3895 3896
	}
}

3897 3898
static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
					   const struct intel_crtc_state *crtc_state)
3899
{
3900
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3901
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3902
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3903 3904 3905

	lockdep_assert_held(&dev_priv->pps_mutex);

3906
	drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
3907

3908 3909 3910 3911 3912 3913 3914
	if (intel_dp->pps_pipe != INVALID_PIPE &&
	    intel_dp->pps_pipe != crtc->pipe) {
		/*
		 * If another power sequencer was being used on this
		 * port previously make sure to turn off vdd there while
		 * we still have control of it.
		 */
3915
		vlv_detach_power_sequencer(intel_dp);
3916
	}
3917 3918 3919 3920 3921

	/*
	 * We may be stealing the power
	 * sequencer from another port.
	 */
3922
	vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3923

3924 3925
	intel_dp->active_pipe = crtc->pipe;

3926
	if (!intel_dp_is_edp(intel_dp))
3927 3928
		return;

3929 3930 3931
	/* now it's all ours */
	intel_dp->pps_pipe = crtc->pipe;

3932 3933 3934 3935
	drm_dbg_kms(&dev_priv->drm,
		    "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
		    pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
		    encoder->base.name);
3936 3937

	/* init power sequencer on this pipe and port */
3938 3939
	intel_dp_init_panel_power_sequencer(intel_dp);
	intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3940 3941
}

3942 3943
static void vlv_pre_enable_dp(struct intel_atomic_state *state,
			      struct intel_encoder *encoder,
3944 3945
			      const struct intel_crtc_state *pipe_config,
			      const struct drm_connector_state *conn_state)
3946
{
3947
	vlv_phy_pre_encoder_enable(encoder, pipe_config);
3948

3949
	intel_enable_dp(state, encoder, pipe_config, conn_state);
3950 3951
}

3952 3953
static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
				  struct intel_encoder *encoder,
3954 3955
				  const struct intel_crtc_state *pipe_config,
				  const struct drm_connector_state *conn_state)
3956
{
3957
	intel_dp_prepare(encoder, pipe_config);
3958

3959
	vlv_phy_pre_pll_enable(encoder, pipe_config);
3960 3961
}

3962 3963
static void chv_pre_enable_dp(struct intel_atomic_state *state,
			      struct intel_encoder *encoder,
3964 3965
			      const struct intel_crtc_state *pipe_config,
			      const struct drm_connector_state *conn_state)
3966
{
3967
	chv_phy_pre_encoder_enable(encoder, pipe_config);
3968

3969
	intel_enable_dp(state, encoder, pipe_config, conn_state);
3970 3971

	/* Second common lane will stay alive on its own now */
3972
	chv_phy_release_cl2_override(encoder);
3973 3974
}

3975 3976
static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
				  struct intel_encoder *encoder,
3977 3978
				  const struct intel_crtc_state *pipe_config,
				  const struct drm_connector_state *conn_state)
3979
{
3980
	intel_dp_prepare(encoder, pipe_config);
3981

3982
	chv_phy_pre_pll_enable(encoder, pipe_config);
3983 3984
}

3985 3986
static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
				    struct intel_encoder *encoder,
3987 3988
				    const struct intel_crtc_state *old_crtc_state,
				    const struct drm_connector_state *old_conn_state)
3989
{
3990
	chv_phy_post_pll_disable(encoder, old_crtc_state);
3991 3992
}

3993 3994 3995 3996
/*
 * Fetch AUX CH registers 0x202 - 0x207 which contain
 * link status information
 */
3997
bool
3998
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3999
{
4000 4001
	return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
				DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
4002 4003
}

4004
static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp)
4005
{
4006 4007
	return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
K
Keith Packard 已提交
4008

4009 4010 4011
static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp)
{
	return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
K
Keith Packard 已提交
4012 4013
}

4014
static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp)
K
Keith Packard 已提交
4015
{
4016 4017
	return DP_TRAIN_PRE_EMPH_LEVEL_2;
}
K
Keith Packard 已提交
4018

4019 4020 4021
static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp)
{
	return DP_TRAIN_PRE_EMPH_LEVEL_3;
4022 4023
}

4024
static void vlv_set_signal_levels(struct intel_dp *intel_dp)
4025
{
4026
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4027 4028
	unsigned long demph_reg_value, preemph_reg_value,
		uniqtranscale_reg_value;
4029
	u8 train_set = intel_dp->train_set[0];
4030 4031

	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4032
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
4033 4034
		preemph_reg_value = 0x0004000;
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4035
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4036 4037 4038
			demph_reg_value = 0x2B405555;
			uniqtranscale_reg_value = 0x552AB83A;
			break;
4039
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4040 4041 4042
			demph_reg_value = 0x2B404040;
			uniqtranscale_reg_value = 0x5548B83A;
			break;
4043
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4044 4045 4046
			demph_reg_value = 0x2B245555;
			uniqtranscale_reg_value = 0x5560B83A;
			break;
4047
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4048 4049 4050 4051
			demph_reg_value = 0x2B405555;
			uniqtranscale_reg_value = 0x5598DA3A;
			break;
		default:
4052
			return;
4053 4054
		}
		break;
4055
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
4056 4057
		preemph_reg_value = 0x0002000;
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4058
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4059 4060 4061
			demph_reg_value = 0x2B404040;
			uniqtranscale_reg_value = 0x5552B83A;
			break;
4062
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4063 4064 4065
			demph_reg_value = 0x2B404848;
			uniqtranscale_reg_value = 0x5580B83A;
			break;
4066
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4067 4068 4069 4070
			demph_reg_value = 0x2B404040;
			uniqtranscale_reg_value = 0x55ADDA3A;
			break;
		default:
4071
			return;
4072 4073
		}
		break;
4074
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
4075 4076
		preemph_reg_value = 0x0000000;
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4077
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4078 4079 4080
			demph_reg_value = 0x2B305555;
			uniqtranscale_reg_value = 0x5570B83A;
			break;
4081
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4082 4083 4084 4085
			demph_reg_value = 0x2B2B4040;
			uniqtranscale_reg_value = 0x55ADDA3A;
			break;
		default:
4086
			return;
4087 4088
		}
		break;
4089
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
4090 4091
		preemph_reg_value = 0x0006000;
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4092
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4093 4094 4095 4096
			demph_reg_value = 0x1B405555;
			uniqtranscale_reg_value = 0x55ADDA3A;
			break;
		default:
4097
			return;
4098 4099 4100
		}
		break;
	default:
4101
		return;
4102 4103
	}

4104 4105
	vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
				 uniqtranscale_reg_value, 0);
4106 4107
}

4108
static void chv_set_signal_levels(struct intel_dp *intel_dp)
4109
{
4110 4111 4112
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	u32 deemph_reg_value, margin_reg_value;
	bool uniq_trans_scale = false;
4113
	u8 train_set = intel_dp->train_set[0];
4114 4115

	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4116
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
4117
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4118
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4119 4120 4121
			deemph_reg_value = 128;
			margin_reg_value = 52;
			break;
4122
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4123 4124 4125
			deemph_reg_value = 128;
			margin_reg_value = 77;
			break;
4126
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4127 4128 4129
			deemph_reg_value = 128;
			margin_reg_value = 102;
			break;
4130
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4131 4132
			deemph_reg_value = 128;
			margin_reg_value = 154;
4133
			uniq_trans_scale = true;
4134 4135
			break;
		default:
4136
			return;
4137 4138
		}
		break;
4139
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
4140
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4141
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4142 4143 4144
			deemph_reg_value = 85;
			margin_reg_value = 78;
			break;
4145
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4146 4147 4148
			deemph_reg_value = 85;
			margin_reg_value = 116;
			break;
4149
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4150 4151 4152 4153
			deemph_reg_value = 85;
			margin_reg_value = 154;
			break;
		default:
4154
			return;
4155 4156
		}
		break;
4157
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
4158
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4159
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4160 4161 4162
			deemph_reg_value = 64;
			margin_reg_value = 104;
			break;
4163
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4164 4165 4166 4167
			deemph_reg_value = 64;
			margin_reg_value = 154;
			break;
		default:
4168
			return;
4169 4170
		}
		break;
4171
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
4172
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4173
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4174 4175 4176 4177
			deemph_reg_value = 43;
			margin_reg_value = 154;
			break;
		default:
4178
			return;
4179 4180 4181
		}
		break;
	default:
4182
		return;
4183 4184
	}

4185 4186
	chv_set_phy_signal_level(encoder, deemph_reg_value,
				 margin_reg_value, uniq_trans_scale);
4187 4188
}

4189
static u32 g4x_signal_levels(u8 train_set)
4190
{
4191
	u32 signal_levels = 0;
4192

4193
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4194
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4195 4196 4197
	default:
		signal_levels |= DP_VOLTAGE_0_4;
		break;
4198
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4199 4200
		signal_levels |= DP_VOLTAGE_0_6;
		break;
4201
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4202 4203
		signal_levels |= DP_VOLTAGE_0_8;
		break;
4204
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4205 4206 4207
		signal_levels |= DP_VOLTAGE_1_2;
		break;
	}
4208
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4209
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
4210 4211 4212
	default:
		signal_levels |= DP_PRE_EMPHASIS_0;
		break;
4213
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
4214 4215
		signal_levels |= DP_PRE_EMPHASIS_3_5;
		break;
4216
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
4217 4218
		signal_levels |= DP_PRE_EMPHASIS_6;
		break;
4219
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
4220 4221 4222 4223 4224 4225
		signal_levels |= DP_PRE_EMPHASIS_9_5;
		break;
	}
	return signal_levels;
}

4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244
static void
g4x_set_signal_levels(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u8 train_set = intel_dp->train_set[0];
	u32 signal_levels;

	signal_levels = g4x_signal_levels(train_set);

	drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
		    signal_levels);

	intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
	intel_dp->DP |= signal_levels;

	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
}

4245
/* SNB CPU eDP voltage swing and pre-emphasis control */
4246
static u32 snb_cpu_edp_signal_levels(u8 train_set)
4247
{
4248 4249 4250
	u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
					DP_TRAIN_PRE_EMPHASIS_MASK);

4251
	switch (signal_levels) {
4252 4253
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4254
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4255
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4256
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
4257 4258
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4259
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
4260 4261
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4262
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
4263 4264
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4265
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
4266
	default:
4267 4268 4269
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
			      "0x%x\n", signal_levels);
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4270 4271 4272
	}
}

4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291
static void
snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u8 train_set = intel_dp->train_set[0];
	u32 signal_levels;

	signal_levels = snb_cpu_edp_signal_levels(train_set);

	drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
		    signal_levels);

	intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
	intel_dp->DP |= signal_levels;

	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
}

4292
/* IVB CPU eDP voltage swing and pre-emphasis control */
4293
static u32 ivb_cpu_edp_signal_levels(u8 train_set)
K
Keith Packard 已提交
4294
{
4295 4296 4297
	u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
					DP_TRAIN_PRE_EMPHASIS_MASK);

K
Keith Packard 已提交
4298
	switch (signal_levels) {
4299
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
K
Keith Packard 已提交
4300
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
4301
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
K
Keith Packard 已提交
4302
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
4303
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4304
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
K
Keith Packard 已提交
4305 4306
		return EDP_LINK_TRAIN_400MV_6DB_IVB;

4307
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
K
Keith Packard 已提交
4308
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
4309
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
K
Keith Packard 已提交
4310 4311
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;

4312
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
K
Keith Packard 已提交
4313
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
4314
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
K
Keith Packard 已提交
4315 4316 4317 4318 4319 4320 4321 4322 4323
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;

	default:
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
			      "0x%x\n", signal_levels);
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
	}
}

4324 4325
static void
ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
4326
{
4327
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4328
	u8 train_set = intel_dp->train_set[0];
4329
	u32 signal_levels;
4330

4331 4332 4333 4334 4335 4336 4337
	signal_levels = ivb_cpu_edp_signal_levels(train_set);

	drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
		    signal_levels);

	intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
	intel_dp->DP |= signal_levels;
4338

4339 4340 4341 4342 4343 4344 4345 4346
	intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
}

void intel_dp_set_signal_levels(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u8 train_set = intel_dp->train_set[0];
4347 4348 4349 4350 4351 4352 4353 4354 4355

	drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
		    train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
		    train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
	drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
		    (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
		    DP_TRAIN_PRE_EMPHASIS_SHIFT,
		    train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
		    " (max)" : "");
4356

4357
	intel_dp->set_signal_levels(intel_dp);
4358 4359
}

4360
void
4361
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4362
				       u8 dp_train_pat)
4363
{
4364 4365
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
4366

4367 4368 4369 4370
	if (dp_train_pat & train_pat_mask)
		drm_dbg_kms(&dev_priv->drm,
			    "Using DP training pattern TPS%d\n",
			    dp_train_pat & train_pat_mask);
4371

4372
	intel_dp->set_link_train(intel_dp, dp_train_pat);
4373 4374
}

4375
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
4376
{
4377 4378
	if (intel_dp->set_idle_link_train)
		intel_dp->set_idle_link_train(intel_dp);
4379 4380
}

4381
static void
4382 4383
intel_dp_link_down(struct intel_encoder *encoder,
		   const struct intel_crtc_state *old_crtc_state)
4384
{
4385
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4386
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4387
	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4388
	enum port port = encoder->port;
4389
	u32 DP = intel_dp->DP;
4390

4391 4392 4393
	if (drm_WARN_ON(&dev_priv->drm,
			(intel_de_read(dev_priv, intel_dp->output_reg) &
			 DP_PORT_EN) == 0))
4394 4395
		return;

4396
	drm_dbg_kms(&dev_priv->drm, "\n");
4397

4398
	if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4399
	    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4400
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
4401
		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4402
	} else {
4403
		DP &= ~DP_LINK_TRAIN_MASK;
4404
		DP |= DP_LINK_TRAIN_PAT_IDLE;
4405
	}
4406 4407
	intel_de_write(dev_priv, intel_dp->output_reg, DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
4408

4409
	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4410 4411
	intel_de_write(dev_priv, intel_dp->output_reg, DP);
	intel_de_posting_read(dev_priv, intel_dp->output_reg);
4412 4413 4414 4415 4416 4417

	/*
	 * HW workaround for IBX, we need to move the port
	 * to transcoder A after disabling it to allow the
	 * matching HDMI port to be enabled on transcoder A.
	 */
4418
	if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4419 4420 4421 4422 4423 4424 4425
		/*
		 * We get CPU/PCH FIFO underruns on the other pipe when
		 * doing the workaround. Sweep them under the rug.
		 */
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);

4426
		/* always enable with pattern 1 (as per spec) */
4427 4428 4429
		DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
		DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
			DP_LINK_TRAIN_PAT_1;
4430 4431
		intel_de_write(dev_priv, intel_dp->output_reg, DP);
		intel_de_posting_read(dev_priv, intel_dp->output_reg);
4432 4433

		DP &= ~DP_PORT_EN;
4434 4435
		intel_de_write(dev_priv, intel_dp->output_reg, DP);
		intel_de_posting_read(dev_priv, intel_dp->output_reg);
4436

4437
		intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4438 4439
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4440 4441
	}

4442
	msleep(intel_dp->panel_power_down_delay);
4443 4444

	intel_dp->DP = DP;
4445 4446

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4447 4448 4449 4450
		intel_wakeref_t wakeref;

		with_pps_lock(intel_dp, wakeref)
			intel_dp->active_pipe = INVALID_PIPE;
4451
	}
4452 4453
}

4454 4455 4456
static void
intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
{
4457
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
	u8 dpcd_ext[6];

	/*
	 * Prior to DP1.3 the bit represented by
	 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
	 * if it is set DP_DPCD_REV at 0000h could be at a value less than
	 * the true capability of the panel. The only way to check is to
	 * then compare 0000h and 2200h.
	 */
	if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
	      DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
		return;

	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
			     &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4473 4474
		drm_err(&i915->drm,
			"DPCD failed read at extended capabilities\n");
4475 4476 4477 4478
		return;
	}

	if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4479 4480
		drm_dbg_kms(&i915->drm,
			    "DPCD extended DPCD rev less than base DPCD rev\n");
4481 4482 4483 4484 4485 4486
		return;
	}

	if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
		return;

4487 4488
	drm_dbg_kms(&i915->drm, "Base DPCD: %*ph\n",
		    (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4489 4490 4491 4492

	memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
}

4493
bool
4494
intel_dp_read_dpcd(struct intel_dp *intel_dp)
4495
{
4496 4497
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

4498 4499
	if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
			     sizeof(intel_dp->dpcd)) < 0)
4500
		return false; /* aux transfer failed */
4501

4502 4503
	intel_dp_extended_receiver_capabilities(intel_dp);

4504 4505
	drm_dbg_kms(&i915->drm, "DPCD: %*ph\n", (int)sizeof(intel_dp->dpcd),
		    intel_dp->dpcd);
4506

4507 4508
	return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
4509

4510 4511 4512 4513 4514 4515 4516 4517 4518 4519
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
	u8 dprx = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
			      &dprx) != 1)
		return false;
	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
}

4520 4521
static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
{
4522 4523
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

4524 4525 4526 4527 4528 4529
	/*
	 * Clear the cached register set to avoid using stale values
	 * for the sinks that do not support DSC.
	 */
	memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));

4530 4531 4532
	/* Clear fec_capable to avoid using stale values */
	intel_dp->fec_capable = 0;

4533 4534 4535 4536 4537 4538
	/* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
	    intel_dp->edp_dpcd[0] >= DP_EDP_14) {
		if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
				     intel_dp->dsc_dpcd,
				     sizeof(intel_dp->dsc_dpcd)) < 0)
4539 4540 4541
			drm_err(&i915->drm,
				"Failed to read DPCD register 0x%x\n",
				DP_DSC_SUPPORT);
4542

4543 4544 4545
		drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
			    (int)sizeof(intel_dp->dsc_dpcd),
			    intel_dp->dsc_dpcd);
4546

4547
		/* FEC is supported only on DP 1.4 */
4548 4549 4550
		if (!intel_dp_is_edp(intel_dp) &&
		    drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
				      &intel_dp->fec_capable) < 0)
4551 4552
			drm_err(&i915->drm,
				"Failed to read FEC DPCD register\n");
4553

4554 4555
		drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
			    intel_dp->fec_capable);
4556 4557 4558
	}
}

4559 4560 4561 4562 4563
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4564

4565
	/* this function is meant to be called only once */
4566
	drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
4567

4568
	if (!intel_dp_read_dpcd(intel_dp))
4569 4570
		return false;

4571 4572
	drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
			 drm_dp_is_branch(intel_dp->dpcd));
4573

4574 4575 4576 4577 4578 4579 4580 4581 4582 4583
	/*
	 * Read the eDP display control registers.
	 *
	 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
	 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
	 * set, but require eDP 1.4+ detection (e.g. for supported link rates
	 * method). The display control registers should read zero if they're
	 * not supported anyway.
	 */
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
D
Dan Carpenter 已提交
4584 4585
			     intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
			     sizeof(intel_dp->edp_dpcd))
4586 4587 4588
		drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
			    (int)sizeof(intel_dp->edp_dpcd),
			    intel_dp->edp_dpcd);
4589

4590 4591 4592 4593 4594 4595
	/*
	 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
	 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
	 */
	intel_psr_init_dpcd(intel_dp);

4596 4597
	/* Read the eDP 1.4+ supported link rates. */
	if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4598
		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4599 4600
		int i;

4601 4602
		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
				sink_rates, sizeof(sink_rates));
4603

4604 4605
		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
			int val = le16_to_cpu(sink_rates[i]);
4606 4607 4608 4609

			if (val == 0)
				break;

4610 4611 4612 4613 4614 4615
			/* Value read multiplied by 200kHz gives the per-lane
			 * link rate in kHz. The source rates are, however,
			 * stored in terms of LS_Clk kHz. The full conversion
			 * back to symbols is
			 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
			 */
4616
			intel_dp->sink_rates[i] = (val * 200) / 10;
4617
		}
4618
		intel_dp->num_sink_rates = i;
4619
	}
4620

4621 4622 4623 4624
	/*
	 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
	 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
	 */
4625 4626 4627 4628 4629
	if (intel_dp->num_sink_rates)
		intel_dp->use_rate_select = true;
	else
		intel_dp_set_sink_rates(intel_dp);

4630 4631
	intel_dp_set_common_rates(intel_dp);

4632 4633 4634 4635
	/* Read the eDP DSC DPCD registers */
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
		intel_dp_get_dsc_sink_cap(intel_dp);

4636 4637 4638 4639 4640 4641 4642 4643 4644 4645
	return true;
}


static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
	if (!intel_dp_read_dpcd(intel_dp))
		return false;

4646 4647 4648 4649
	/*
	 * Don't clobber cached eDP rates. Also skip re-reading
	 * the OUI/ID since we know it won't change.
	 */
4650
	if (!intel_dp_is_edp(intel_dp)) {
4651 4652 4653
		drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
				 drm_dp_is_branch(intel_dp->dpcd));

4654
		intel_dp_set_sink_rates(intel_dp);
4655 4656
		intel_dp_set_common_rates(intel_dp);
	}
4657

4658
	/*
4659 4660
	 * Some eDP panels do not set a valid value for sink count, that is why
	 * it don't care about read it here and in intel_edp_init_dpcd().
4661
	 */
4662
	if (!intel_dp_is_edp(intel_dp) &&
L
Lyude Paul 已提交
4663 4664
	    !drm_dp_has_quirk(&intel_dp->desc, 0,
			      DP_DPCD_QUIRK_NO_SINK_COUNT)) {
4665 4666
		u8 count;
		ssize_t r;
4667

4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688
		r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
		if (r < 1)
			return false;

		/*
		 * Sink count can change between short pulse hpd hence
		 * a member variable in intel_dp will track any changes
		 * between short pulse interrupts.
		 */
		intel_dp->sink_count = DP_GET_SINK_COUNT(count);

		/*
		 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
		 * a dongle is present but no display. Unless we require to know
		 * if a dongle is present or not, we don't need to update
		 * downstream port information. So, an early return here saves
		 * time from performing other operations which are not required.
		 */
		if (!intel_dp->sink_count)
			return false;
	}
4689

4690
	if (!drm_dp_is_branch(intel_dp->dpcd))
4691 4692 4693 4694 4695
		return true; /* native DP sink */

	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
		return true; /* no per-port downstream info */

4696 4697 4698
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
			     intel_dp->downstream_ports,
			     DP_MAX_DOWNSTREAM_PORTS) < 0)
4699 4700 4701
		return false; /* downstream port status fetch failed */

	return true;
4702 4703
}

4704
static bool
4705
intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4706
{
4707
	u8 mstm_cap;
4708 4709 4710 4711

	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
		return false;

4712
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4713
		return false;
4714

4715
	return mstm_cap & DP_MST_CAP;
4716 4717
}

4718 4719 4720
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
4721 4722 4723
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	return i915->params.enable_dp_mst &&
4724 4725 4726 4727
		intel_dp->can_mst &&
		intel_dp_sink_can_mst(intel_dp);
}

4728 4729 4730
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
4731
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4732 4733 4734 4735
	struct intel_encoder *encoder =
		&dp_to_dig_port(intel_dp)->base;
	bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);

4736 4737 4738 4739
	drm_dbg_kms(&i915->drm,
		    "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
		    encoder->base.base.id, encoder->base.name,
		    yesno(intel_dp->can_mst), yesno(sink_can_mst),
4740
		    yesno(i915->params.enable_dp_mst));
4741 4742 4743 4744

	if (!intel_dp->can_mst)
		return;

4745
	intel_dp->is_mst = sink_can_mst &&
4746
		i915->params.enable_dp_mst;
4747 4748 4749

	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
					intel_dp->is_mst);
4750 4751 4752 4753 4754
}

static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
4755 4756 4757
	return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
				sink_irq_vector, DP_DPRX_ESI_LEN) ==
		DP_DPRX_ESI_LEN;
4758 4759
}

4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785
bool
intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
		       const struct drm_connector_state *conn_state)
{
	/*
	 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
	 * of Color Encoding Format and Content Color Gamut], in order to
	 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
	 */
	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
		return true;

	switch (conn_state->colorspace) {
	case DRM_MODE_COLORIMETRY_SYCC_601:
	case DRM_MODE_COLORIMETRY_OPYCC_601:
	case DRM_MODE_COLORIMETRY_BT2020_YCC:
	case DRM_MODE_COLORIMETRY_BT2020_RGB:
	case DRM_MODE_COLORIMETRY_BT2020_CYCC:
		return true;
	default:
		break;
	}

	return false;
}

4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804
static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
				     struct dp_sdp *sdp, size_t size)
{
	size_t length = sizeof(struct dp_sdp);

	if (size < length)
		return -ENOSPC;

	memset(sdp, 0, size);

	/*
	 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
	 * VSC SDP Header Bytes
	 */
	sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
	sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
	sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
	sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */

4805 4806 4807 4808 4809 4810 4811
	/*
	 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
	 * per DP 1.4a spec.
	 */
	if (vsc->revision != 0x5)
		goto out;

4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843
	/* VSC SDP Payload for DB16 through DB18 */
	/* Pixel Encoding and Colorimetry Formats  */
	sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
	sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */

	switch (vsc->bpc) {
	case 6:
		/* 6bpc: 0x0 */
		break;
	case 8:
		sdp->db[17] = 0x1; /* DB17[3:0] */
		break;
	case 10:
		sdp->db[17] = 0x2;
		break;
	case 12:
		sdp->db[17] = 0x3;
		break;
	case 16:
		sdp->db[17] = 0x4;
		break;
	default:
		MISSING_CASE(vsc->bpc);
		break;
	}
	/* Dynamic Range and Component Bit Depth */
	if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
		sdp->db[17] |= 0x80;  /* DB17[7] */

	/* Content Type */
	sdp->db[18] = vsc->content_type & 0x7;

4844
out:
4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927
	return length;
}

static ssize_t
intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
					 struct dp_sdp *sdp,
					 size_t size)
{
	size_t length = sizeof(struct dp_sdp);
	const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
	unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
	ssize_t len;

	if (size < length)
		return -ENOSPC;

	memset(sdp, 0, size);

	len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
	if (len < 0) {
		DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
		return -ENOSPC;
	}

	if (len != infoframe_size) {
		DRM_DEBUG_KMS("wrong static hdr metadata size\n");
		return -ENOSPC;
	}

	/*
	 * Set up the infoframe sdp packet for HDR static metadata.
	 * Prepare VSC Header for SU as per DP 1.4a spec,
	 * Table 2-100 and Table 2-101
	 */

	/* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
	sdp->sdp_header.HB0 = 0;
	/*
	 * Packet Type 80h + Non-audio INFOFRAME Type value
	 * HDMI_INFOFRAME_TYPE_DRM: 0x87
	 * - 80h + Non-audio INFOFRAME Type value
	 * - InfoFrame Type: 0x07
	 *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
	 */
	sdp->sdp_header.HB1 = drm_infoframe->type;
	/*
	 * Least Significant Eight Bits of (Data Byte Count – 1)
	 * infoframe_size - 1
	 */
	sdp->sdp_header.HB2 = 0x1D;
	/* INFOFRAME SDP Version Number */
	sdp->sdp_header.HB3 = (0x13 << 2);
	/* CTA Header Byte 2 (INFOFRAME Version Number) */
	sdp->db[0] = drm_infoframe->version;
	/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
	sdp->db[1] = drm_infoframe->length;
	/*
	 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
	 * HDMI_INFOFRAME_HEADER_SIZE
	 */
	BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
	memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
	       HDMI_DRM_INFOFRAME_SIZE);

	/*
	 * Size of DP infoframe sdp packet for HDR static metadata consists of
	 * - DP SDP Header(struct dp_sdp_header): 4 bytes
	 * - Two Data Blocks: 2 bytes
	 *    CTA Header Byte2 (INFOFRAME Version Number)
	 *    CTA Header Byte3 (Length of INFOFRAME)
	 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
	 *
	 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
	 * infoframe size. But GEN11+ has larger than that size, write_infoframe
	 * will pad rest of the size.
	 */
	return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
}

static void intel_write_dp_sdp(struct intel_encoder *encoder,
			       const struct intel_crtc_state *crtc_state,
			       unsigned int type)
{
4928
	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct dp_sdp sdp = {};
	ssize_t len;

	if ((crtc_state->infoframes.enable &
	     intel_hdmi_infoframe_enable(type)) == 0)
		return;

	switch (type) {
	case DP_SDP_VSC:
		len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
					    sizeof(sdp));
		break;
	case HDMI_PACKET_TYPE_GAMUT_METADATA:
		len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
							       &sdp, sizeof(sdp));
		break;
	default:
		MISSING_CASE(type);
4948
		return;
4949 4950 4951 4952 4953
	}

	if (drm_WARN_ON(&dev_priv->drm, len < 0))
		return;

4954
	dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
4955 4956
}

4957 4958 4959 4960
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
			    const struct intel_crtc_state *crtc_state,
			    struct drm_dp_vsc_sdp *vsc)
{
4961
	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4962 4963 4964 4965 4966 4967 4968 4969 4970
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct dp_sdp sdp = {};
	ssize_t len;

	len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));

	if (drm_WARN_ON(&dev_priv->drm, len < 0))
		return;

4971
	dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
4972 4973 4974
					&sdp, len);
}

4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010
void intel_dp_set_infoframes(struct intel_encoder *encoder,
			     bool enable,
			     const struct intel_crtc_state *crtc_state,
			     const struct drm_connector_state *conn_state)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
	i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
	u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
			 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
			 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
	u32 val = intel_de_read(dev_priv, reg);

	/* TODO: Add DSC case (DIP_ENABLE_PPS) */
	/* When PSR is enabled, this routine doesn't disable VSC DIP */
	if (intel_psr_enabled(intel_dp))
		val &= ~dip_enable;
	else
		val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);

	if (!enable) {
		intel_de_write(dev_priv, reg, val);
		intel_de_posting_read(dev_priv, reg);
		return;
	}

	intel_de_write(dev_priv, reg, val);
	intel_de_posting_read(dev_priv, reg);

	/* When PSR is enabled, VSC SDP is handled by PSR routine */
	if (!intel_psr_enabled(intel_dp))
		intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);

	intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
}

G
Gwan-gyeong Mun 已提交
5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130
static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
				   const void *buffer, size_t size)
{
	const struct dp_sdp *sdp = buffer;

	if (size < sizeof(struct dp_sdp))
		return -EINVAL;

	memset(vsc, 0, size);

	if (sdp->sdp_header.HB0 != 0)
		return -EINVAL;

	if (sdp->sdp_header.HB1 != DP_SDP_VSC)
		return -EINVAL;

	vsc->sdp_type = sdp->sdp_header.HB1;
	vsc->revision = sdp->sdp_header.HB2;
	vsc->length = sdp->sdp_header.HB3;

	if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
	    (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
		/*
		 * - HB2 = 0x2, HB3 = 0x8
		 *   VSC SDP supporting 3D stereo + PSR
		 * - HB2 = 0x4, HB3 = 0xe
		 *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
		 *   first scan line of the SU region (applies to eDP v1.4b
		 *   and higher).
		 */
		return 0;
	} else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
		/*
		 * - HB2 = 0x5, HB3 = 0x13
		 *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
		 *   Format.
		 */
		vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
		vsc->colorimetry = sdp->db[16] & 0xf;
		vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;

		switch (sdp->db[17] & 0x7) {
		case 0x0:
			vsc->bpc = 6;
			break;
		case 0x1:
			vsc->bpc = 8;
			break;
		case 0x2:
			vsc->bpc = 10;
			break;
		case 0x3:
			vsc->bpc = 12;
			break;
		case 0x4:
			vsc->bpc = 16;
			break;
		default:
			MISSING_CASE(sdp->db[17] & 0x7);
			return -EINVAL;
		}

		vsc->content_type = sdp->db[18] & 0x7;
	} else {
		return -EINVAL;
	}

	return 0;
}

static int
intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
					   const void *buffer, size_t size)
{
	int ret;

	const struct dp_sdp *sdp = buffer;

	if (size < sizeof(struct dp_sdp))
		return -EINVAL;

	if (sdp->sdp_header.HB0 != 0)
		return -EINVAL;

	if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
		return -EINVAL;

	/*
	 * Least Significant Eight Bits of (Data Byte Count – 1)
	 * 1Dh (i.e., Data Byte Count = 30 bytes).
	 */
	if (sdp->sdp_header.HB2 != 0x1D)
		return -EINVAL;

	/* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
	if ((sdp->sdp_header.HB3 & 0x3) != 0)
		return -EINVAL;

	/* INFOFRAME SDP Version Number */
	if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
		return -EINVAL;

	/* CTA Header Byte 2 (INFOFRAME Version Number) */
	if (sdp->db[0] != 1)
		return -EINVAL;

	/* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
	if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
		return -EINVAL;

	ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
					     HDMI_DRM_INFOFRAME_SIZE);

	return ret;
}

static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
				  struct intel_crtc_state *crtc_state,
				  struct drm_dp_vsc_sdp *vsc)
{
5131
	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
G
Gwan-gyeong Mun 已提交
5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	unsigned int type = DP_SDP_VSC;
	struct dp_sdp sdp = {};
	int ret;

	/* When PSR is enabled, VSC SDP is handled by PSR routine */
	if (intel_psr_enabled(intel_dp))
		return;

	if ((crtc_state->infoframes.enable &
	     intel_hdmi_infoframe_enable(type)) == 0)
		return;

5146
	dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
G
Gwan-gyeong Mun 已提交
5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157

	ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));

	if (ret)
		drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
}

static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
						     struct intel_crtc_state *crtc_state,
						     struct hdmi_drm_infoframe *drm_infoframe)
{
5158
	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
G
Gwan-gyeong Mun 已提交
5159 5160 5161 5162 5163 5164 5165 5166 5167
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
	struct dp_sdp sdp = {};
	int ret;

	if ((crtc_state->infoframes.enable &
	    intel_hdmi_infoframe_enable(type)) == 0)
		return;

5168 5169
	dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
				 sizeof(sdp));
G
Gwan-gyeong Mun 已提交
5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182

	ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
							 sizeof(sdp));

	if (ret)
		drm_dbg_kms(&dev_priv->drm,
			    "Failed to unpack DP HDR Metadata Infoframe SDP\n");
}

void intel_read_dp_sdp(struct intel_encoder *encoder,
		       struct intel_crtc_state *crtc_state,
		       unsigned int type)
{
5183 5184 5185
	if (encoder->type != INTEL_OUTPUT_DDI)
		return;

G
Gwan-gyeong Mun 已提交
5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200
	switch (type) {
	case DP_SDP_VSC:
		intel_read_dp_vsc_sdp(encoder, crtc_state,
				      &crtc_state->infoframes.vsc);
		break;
	case HDMI_PACKET_TYPE_GAMUT_METADATA:
		intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
							 &crtc_state->infoframes.drm.drm);
		break;
	default:
		MISSING_CASE(type);
		break;
	}
}

5201
static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
5202
{
5203
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5204
	int status = 0;
5205
	int test_link_rate;
5206
	u8 test_lane_count, test_link_bw;
5207 5208 5209 5210 5211 5212 5213 5214
	/* (DP CTS 1.2)
	 * 4.3.1.11
	 */
	/* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
				   &test_lane_count);

	if (status <= 0) {
5215
		drm_dbg_kms(&i915->drm, "Lane count read failed\n");
5216 5217 5218 5219 5220 5221 5222
		return DP_TEST_NAK;
	}
	test_lane_count &= DP_MAX_LANE_COUNT_MASK;

	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
				   &test_link_bw);
	if (status <= 0) {
5223
		drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
5224 5225 5226
		return DP_TEST_NAK;
	}
	test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
5227 5228 5229 5230

	/* Validate the requested link rate and lane count */
	if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
					test_lane_count))
5231 5232 5233 5234 5235 5236
		return DP_TEST_NAK;

	intel_dp->compliance.test_lane_count = test_lane_count;
	intel_dp->compliance.test_link_rate = test_link_rate;

	return DP_TEST_ACK;
5237 5238
}

5239
static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
5240
{
5241
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5242 5243
	u8 test_pattern;
	u8 test_misc;
5244 5245 5246 5247
	__be16 h_width, v_height;
	int status = 0;

	/* Read the TEST_PATTERN (DP CTS 3.1.5) */
5248 5249
	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
				   &test_pattern);
5250
	if (status <= 0) {
5251
		drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
5252 5253 5254 5255 5256 5257 5258 5259
		return DP_TEST_NAK;
	}
	if (test_pattern != DP_COLOR_RAMP)
		return DP_TEST_NAK;

	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
				  &h_width, 2);
	if (status <= 0) {
5260
		drm_dbg_kms(&i915->drm, "H Width read failed\n");
5261 5262 5263 5264 5265 5266
		return DP_TEST_NAK;
	}

	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
				  &v_height, 2);
	if (status <= 0) {
5267
		drm_dbg_kms(&i915->drm, "V Height read failed\n");
5268 5269 5270
		return DP_TEST_NAK;
	}

5271 5272
	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
				   &test_misc);
5273
	if (status <= 0) {
5274
		drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295
		return DP_TEST_NAK;
	}
	if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
		return DP_TEST_NAK;
	if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
		return DP_TEST_NAK;
	switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
	case DP_TEST_BIT_DEPTH_6:
		intel_dp->compliance.test_data.bpc = 6;
		break;
	case DP_TEST_BIT_DEPTH_8:
		intel_dp->compliance.test_data.bpc = 8;
		break;
	default:
		return DP_TEST_NAK;
	}

	intel_dp->compliance.test_data.video_pattern = test_pattern;
	intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
	intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
	/* Set test active flag here so userspace doesn't interrupt things */
5296
	intel_dp->compliance.test_active = true;
5297 5298

	return DP_TEST_ACK;
5299 5300
}

5301
static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
5302
{
5303
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5304
	u8 test_result = DP_TEST_ACK;
5305 5306 5307 5308
	struct intel_connector *intel_connector = intel_dp->attached_connector;
	struct drm_connector *connector = &intel_connector->base;

	if (intel_connector->detect_edid == NULL ||
5309
	    connector->edid_corrupt ||
5310 5311 5312 5313 5314 5315 5316 5317 5318 5319
	    intel_dp->aux.i2c_defer_count > 6) {
		/* Check EDID read for NACKs, DEFERs and corruption
		 * (DP CTS 1.2 Core r1.1)
		 *    4.2.2.4 : Failed EDID read, I2C_NAK
		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
		 *    4.2.2.6 : EDID corruption detected
		 * Use failsafe mode for all cases
		 */
		if (intel_dp->aux.i2c_nack_count > 0 ||
			intel_dp->aux.i2c_defer_count > 0)
5320 5321 5322 5323
			drm_dbg_kms(&i915->drm,
				    "EDID read had %d NACKs, %d DEFERs\n",
				    intel_dp->aux.i2c_nack_count,
				    intel_dp->aux.i2c_defer_count);
5324
		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
5325
	} else {
5326 5327 5328 5329 5330 5331 5332
		struct edid *block = intel_connector->detect_edid;

		/* We have to write the checksum
		 * of the last block read
		 */
		block += intel_connector->detect_edid->extensions;

5333 5334
		if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
				       block->checksum) <= 0)
5335 5336
			drm_dbg_kms(&i915->drm,
				    "Failed to write EDID checksum\n");
5337 5338

		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
5339
		intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
5340 5341 5342
	}

	/* Set test active flag here so userspace doesn't interrupt things */
5343
	intel_dp->compliance.test_active = true;
5344

5345 5346 5347
	return test_result;
}

5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366
static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp)
{
	struct drm_dp_phy_test_params *data =
		&intel_dp->compliance.test_data.phytest;

	if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
		DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
		return DP_TEST_NAK;
	}

	/*
	 * link_mst is set to false to avoid executing mst related code
	 * during compliance testing.
	 */
	intel_dp->link_mst = false;

	return DP_TEST_ACK;
}

5367 5368 5369 5370
static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
			to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
5371
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5372 5373
	struct drm_dp_phy_test_params *data =
			&intel_dp->compliance.test_data.phytest;
5374
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435
	enum pipe pipe = crtc->pipe;
	u32 pattern_val;

	switch (data->phy_pattern) {
	case DP_PHY_TEST_PATTERN_NONE:
		DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
		break;
	case DP_PHY_TEST_PATTERN_D10_2:
		DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
		break;
	case DP_PHY_TEST_PATTERN_ERROR_COUNT:
		DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
			       DDI_DP_COMP_CTL_ENABLE |
			       DDI_DP_COMP_CTL_SCRAMBLED_0);
		break;
	case DP_PHY_TEST_PATTERN_PRBS7:
		DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
		break;
	case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
		/*
		 * FIXME: Ideally pattern should come from DPCD 0x250. As
		 * current firmware of DPR-100 could not set it, so hardcoding
		 * now for complaince test.
		 */
		DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
		pattern_val = 0x3e0f83e0;
		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
		pattern_val = 0x0f83e0f8;
		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
		pattern_val = 0x0000f83e;
		intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
			       DDI_DP_COMP_CTL_ENABLE |
			       DDI_DP_COMP_CTL_CUSTOM80);
		break;
	case DP_PHY_TEST_PATTERN_CP2520:
		/*
		 * FIXME: Ideally pattern should come from DPCD 0x24A. As
		 * current firmware of DPR-100 could not set it, so hardcoding
		 * now for complaince test.
		 */
		DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
		pattern_val = 0xFB;
		intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
			       DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
			       pattern_val);
		break;
	default:
		WARN(1, "Invalid Phy Test Pattern\n");
	}
}

static void
intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
{
5436 5437
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
5438
	struct drm_i915_private *dev_priv = to_i915(dev);
5439
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461
	enum pipe pipe = crtc->pipe;
	u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;

	trans_ddi_func_ctl_value = intel_de_read(dev_priv,
						 TRANS_DDI_FUNC_CTL(pipe));
	trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
	dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));

	trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
				      TGL_TRANS_DDI_PORT_MASK);
	trans_conf_value &= ~PIPECONF_ENABLE;
	dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;

	intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
		       trans_ddi_func_ctl_value);
	intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
}

static void
intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
{
5462 5463
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
5464
	struct drm_i915_private *dev_priv = to_i915(dev);
5465 5466
	enum port port = dig_port->base.port;
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511
	enum pipe pipe = crtc->pipe;
	u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;

	trans_ddi_func_ctl_value = intel_de_read(dev_priv,
						 TRANS_DDI_FUNC_CTL(pipe));
	trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
	dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));

	trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
				    TGL_TRANS_DDI_SELECT_PORT(port);
	trans_conf_value |= PIPECONF_ENABLE;
	dp_tp_ctl_value |= DP_TP_CTL_ENABLE;

	intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
	intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
	intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
		       trans_ddi_func_ctl_value);
}

void intel_dp_process_phy_request(struct intel_dp *intel_dp)
{
	struct drm_dp_phy_test_params *data =
		&intel_dp->compliance.test_data.phytest;
	u8 link_status[DP_LINK_STATUS_SIZE];

	if (!intel_dp_get_link_status(intel_dp, link_status)) {
		DRM_DEBUG_KMS("failed to get link status\n");
		return;
	}

	/* retrieve vswing & pre-emphasis setting */
	intel_dp_get_adjust_train(intel_dp, link_status);

	intel_dp_autotest_phy_ddi_disable(intel_dp);

	intel_dp_set_signal_levels(intel_dp);

	intel_dp_phy_pattern_update(intel_dp);

	intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);

	drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
				    link_status[DP_DPCD_REV]);
}

5512
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
5513
{
5514
	u8 test_result;
5515 5516 5517 5518 5519

	test_result = intel_dp_prepare_phytest(intel_dp);
	if (test_result != DP_TEST_ACK)
		DRM_ERROR("Phy test preparation failed\n");

5520 5521
	intel_dp_process_phy_request(intel_dp);

5522 5523 5524 5525 5526
	return test_result;
}

static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
5527
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5528 5529
	u8 response = DP_TEST_NAK;
	u8 request = 0;
5530
	int status;
5531

5532
	status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
5533
	if (status <= 0) {
5534 5535
		drm_dbg_kms(&i915->drm,
			    "Could not read test request from sink\n");
5536 5537 5538
		goto update_status;
	}

5539
	switch (request) {
5540
	case DP_TEST_LINK_TRAINING:
5541
		drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
5542 5543 5544
		response = intel_dp_autotest_link_training(intel_dp);
		break;
	case DP_TEST_LINK_VIDEO_PATTERN:
5545
		drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
5546 5547 5548
		response = intel_dp_autotest_video_pattern(intel_dp);
		break;
	case DP_TEST_LINK_EDID_READ:
5549
		drm_dbg_kms(&i915->drm, "EDID test requested\n");
5550 5551 5552
		response = intel_dp_autotest_edid(intel_dp);
		break;
	case DP_TEST_LINK_PHY_TEST_PATTERN:
5553
		drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
5554 5555 5556
		response = intel_dp_autotest_phy_pattern(intel_dp);
		break;
	default:
5557 5558
		drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
			    request);
5559 5560 5561
		break;
	}

5562 5563 5564
	if (response & DP_TEST_ACK)
		intel_dp->compliance.test_type = request;

5565
update_status:
5566
	status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
5567
	if (status <= 0)
5568 5569
		drm_dbg_kms(&i915->drm,
			    "Could not write test response to sink\n");
5570 5571
}

5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585
/**
 * intel_dp_check_mst_status - service any pending MST interrupts, check link status
 * @intel_dp: Intel DP struct
 *
 * Read any pending MST interrupts, call MST core to handle these and ack the
 * interrupts. Check if the main and AUX link state is ok.
 *
 * Returns:
 * - %true if pending interrupts were serviced (or no interrupts were
 *   pending) w/o detecting an error condition.
 * - %false if an error condition - like AUX failure or a loss of link - is
 *   detected, which needs servicing from the hotplug work.
 */
static bool
5586 5587
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
5588
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5589
	bool link_ok = true;
5590

5591
	drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
5592 5593 5594

	for (;;) {
		u8 esi[DP_DPRX_ESI_LEN] = {};
5595
		bool handled;
5596
		int retry;
5597

5598
		if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
5599 5600
			drm_dbg_kms(&i915->drm,
				    "failed to get ESI - device may have failed\n");
5601 5602 5603
			link_ok = false;

			break;
5604
		}
5605

5606
		/* check link status - esi[10] = 0x200c */
5607
		if (intel_dp->active_mst_links > 0 && link_ok &&
5608 5609 5610
		    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
			drm_dbg_kms(&i915->drm,
				    "channel EQ not ok, retraining\n");
5611
			link_ok = false;
5612
		}
5613

5614
		drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
5615

5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627
		drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
		if (!handled)
			break;

		for (retry = 0; retry < 3; retry++) {
			int wret;

			wret = drm_dp_dpcd_write(&intel_dp->aux,
						 DP_SINK_COUNT_ESI+1,
						 &esi[1], 3);
			if (wret == 3)
				break;
5628 5629
		}
	}
5630

5631
	return link_ok;
5632 5633
}

5634 5635 5636 5637 5638
static bool
intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
	u8 link_status[DP_LINK_STATUS_SIZE];

5639
	if (!intel_dp->link_trained)
5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650
		return false;

	/*
	 * While PSR source HW is enabled, it will control main-link sending
	 * frames, enabling and disabling it so trying to do a retrain will fail
	 * as the link would or not be on or it could mix training patterns
	 * and frame data at the same time causing retrain to fail.
	 * Also when exiting PSR, HW will retrain the link anyways fixing
	 * any link status error.
	 */
	if (intel_psr_enabled(intel_dp))
5651 5652 5653
		return false;

	if (!intel_dp_get_link_status(intel_dp, link_status))
5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667
		return false;

	/*
	 * Validate the cached values of intel_dp->link_rate and
	 * intel_dp->lane_count before attempting to retrain.
	 */
	if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
					intel_dp->lane_count))
		return false;

	/* Retrain if Channel EQ or CR not ok */
	return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
}

5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753
static bool intel_dp_has_connector(struct intel_dp *intel_dp,
				   const struct drm_connector_state *conn_state)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	struct intel_encoder *encoder;
	enum pipe pipe;

	if (!conn_state->best_encoder)
		return false;

	/* SST */
	encoder = &dp_to_dig_port(intel_dp)->base;
	if (conn_state->best_encoder == &encoder->base)
		return true;

	/* MST */
	for_each_pipe(i915, pipe) {
		encoder = &intel_dp->mst_encoders[pipe]->base;
		if (conn_state->best_encoder == &encoder->base)
			return true;
	}

	return false;
}

static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
				      struct drm_modeset_acquire_ctx *ctx,
				      u32 *crtc_mask)
{
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
	struct drm_connector_list_iter conn_iter;
	struct intel_connector *connector;
	int ret = 0;

	*crtc_mask = 0;

	if (!intel_dp_needs_link_retrain(intel_dp))
		return 0;

	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
	for_each_intel_connector_iter(connector, &conn_iter) {
		struct drm_connector_state *conn_state =
			connector->base.state;
		struct intel_crtc_state *crtc_state;
		struct intel_crtc *crtc;

		if (!intel_dp_has_connector(intel_dp, conn_state))
			continue;

		crtc = to_intel_crtc(conn_state->crtc);
		if (!crtc)
			continue;

		ret = drm_modeset_lock(&crtc->base.mutex, ctx);
		if (ret)
			break;

		crtc_state = to_intel_crtc_state(crtc->base.state);

		drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));

		if (!crtc_state->hw.active)
			continue;

		if (conn_state->commit &&
		    !try_wait_for_completion(&conn_state->commit->hw_done))
			continue;

		*crtc_mask |= drm_crtc_mask(&crtc->base);
	}
	drm_connector_list_iter_end(&conn_iter);

	if (!intel_dp_needs_link_retrain(intel_dp))
		*crtc_mask = 0;

	return ret;
}

static bool intel_dp_is_connected(struct intel_dp *intel_dp)
{
	struct intel_connector *connector = intel_dp->attached_connector;

	return connector->base.status == connector_status_connected ||
		intel_dp->is_mst;
}

5754 5755
int intel_dp_retrain_link(struct intel_encoder *encoder,
			  struct drm_modeset_acquire_ctx *ctx)
5756 5757
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5758
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5759
	struct intel_crtc *crtc;
5760
	u32 crtc_mask;
5761 5762
	int ret;

5763
	if (!intel_dp_is_connected(intel_dp))
5764 5765 5766 5767 5768 5769 5770
		return 0;

	ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
			       ctx);
	if (ret)
		return ret;

5771
	ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
5772 5773 5774
	if (ret)
		return ret;

5775
	if (crtc_mask == 0)
5776 5777
		return 0;

5778 5779
	drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
		    encoder->base.base.id, encoder->base.name);
5780

5781 5782 5783
	for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
		const struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);
5784

5785 5786 5787 5788 5789 5790
		/* Suppress underruns caused by re-training */
		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
		if (crtc_state->has_pch_encoder)
			intel_set_pch_fifo_underrun_reporting(dev_priv,
							      intel_crtc_pch_transcoder(crtc), false);
	}
5791 5792 5793 5794

	intel_dp_start_link_train(intel_dp);
	intel_dp_stop_link_train(intel_dp);

5795 5796 5797
	for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
		const struct intel_crtc_state *crtc_state =
			to_intel_crtc_state(crtc->base.state);
5798

5799 5800 5801 5802 5803 5804 5805 5806
		/* Keep underrun reporting disabled until things are stable */
		intel_wait_for_vblank(dev_priv, crtc->pipe);

		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
		if (crtc_state->has_pch_encoder)
			intel_set_pch_fifo_underrun_reporting(dev_priv,
							      intel_crtc_pch_transcoder(crtc), true);
	}
5807 5808

	return 0;
5809 5810
}

5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822
/*
 * If display is now connected check links status,
 * there has been known issues of link loss triggering
 * long pulse.
 *
 * Some sinks (eg. ASUS PB287Q) seem to perform some
 * weird HPD ping pong during modesets. So we can apparently
 * end up with HPD going low during a modeset, and then
 * going back up soon after. And once that happens we must
 * retrain the link to get a picture. That's in case no
 * userspace component reacted to intermittent HPD dip.
 */
5823 5824
static enum intel_hotplug_state
intel_dp_hotplug(struct intel_encoder *encoder,
5825
		 struct intel_connector *connector)
5826
{
5827
	struct drm_modeset_acquire_ctx ctx;
5828
	enum intel_hotplug_state state;
5829
	int ret;
5830

5831
	state = intel_encoder_hotplug(encoder, connector);
5832

5833
	drm_modeset_acquire_init(&ctx, 0);
5834

5835 5836
	for (;;) {
		ret = intel_dp_retrain_link(encoder, &ctx);
5837

5838 5839 5840 5841
		if (ret == -EDEADLK) {
			drm_modeset_backoff(&ctx);
			continue;
		}
5842

5843 5844
		break;
	}
5845

5846 5847
	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);
5848 5849
	drm_WARN(encoder->base.dev, ret,
		 "Acquiring modeset locks failed with %i\n", ret);
5850

5851 5852 5853 5854
	/*
	 * Keeping it consistent with intel_ddi_hotplug() and
	 * intel_hdmi_hotplug().
	 */
5855
	if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
5856 5857
		state = INTEL_HOTPLUG_RETRY;

5858
	return state;
5859 5860
}

5861 5862
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
{
5863
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877
	u8 val;

	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
		return;

	if (drm_dp_dpcd_readb(&intel_dp->aux,
			      DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
		return;

	drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);

	if (val & DP_AUTOMATED_TEST_REQUEST)
		intel_dp_handle_test_request(intel_dp);

5878
	if (val & DP_CP_IRQ)
5879
		intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5880 5881

	if (val & DP_SINK_SPECIFIC_IRQ)
5882
		drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
5883 5884
}

5885 5886 5887 5888 5889 5890 5891
/*
 * According to DP spec
 * 5.1.2:
 *  1. Read DPCD
 *  2. Configure link according to Receiver Capabilities
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
 *  4. Check link status on receipt of hot-plug interrupt
5892 5893 5894 5895 5896
 *
 * intel_dp_short_pulse -  handles short pulse interrupts
 * when full detection is not required.
 * Returns %true if short pulse is handled and full detection
 * is NOT required and %false otherwise.
5897
 */
5898
static bool
5899
intel_dp_short_pulse(struct intel_dp *intel_dp)
5900
{
5901
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5902 5903
	u8 old_sink_count = intel_dp->sink_count;
	bool ret;
5904

5905 5906 5907 5908
	/*
	 * Clearing compliance test variables to allow capturing
	 * of values for next automated test request.
	 */
5909
	memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5910

5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921
	/*
	 * Now read the DPCD to see if it's actually running
	 * If the current value of sink count doesn't match with
	 * the value that was stored earlier or dpcd read failed
	 * we need to do full detection
	 */
	ret = intel_dp_get_dpcd(intel_dp);

	if ((old_sink_count != intel_dp->sink_count) || !ret) {
		/* No need to proceed if we are going to do full detect */
		return false;
5922 5923
	}

5924
	intel_dp_check_service_irq(intel_dp);
5925

5926 5927 5928
	/* Handle CEC interrupts, if any */
	drm_dp_cec_irq(&intel_dp->aux);

5929 5930 5931
	/* defer to the hotplug work for link retraining if needed */
	if (intel_dp_needs_link_retrain(intel_dp))
		return false;
5932

5933 5934
	intel_psr_short_pulse(intel_dp);

5935
	if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
5936 5937
		drm_dbg_kms(&dev_priv->drm,
			    "Link Training Compliance Test requested\n");
5938
		/* Send a Hotplug Uevent to userspace to start modeset */
5939
		drm_kms_helper_hotplug_event(&dev_priv->drm);
5940
	}
5941 5942

	return true;
5943 5944
}

5945
/* XXX this is probably wrong for multiple downstream ports */
5946
static enum drm_connector_status
5947
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5948
{
5949
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5950
	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5951 5952
	u8 *dpcd = intel_dp->dpcd;
	u8 type;
5953

5954
	if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
5955 5956
		return connector_status_connected;

5957 5958 5959
	if (lspcon->active)
		lspcon_resume(lspcon);

5960 5961 5962 5963
	if (!intel_dp_get_dpcd(intel_dp))
		return connector_status_disconnected;

	/* if there's no downstream port, we're done */
5964
	if (!drm_dp_is_branch(dpcd))
5965
		return connector_status_connected;
5966 5967

	/* If we're HPD-aware, SINK_COUNT changes dynamically */
5968 5969
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5970

5971 5972
		return intel_dp->sink_count ?
		connector_status_connected : connector_status_disconnected;
5973 5974
	}

5975 5976 5977
	if (intel_dp_can_mst(intel_dp))
		return connector_status_connected;

5978
	/* If no HPD, poke DDC gently */
5979
	if (drm_probe_ddc(&intel_dp->aux.ddc))
5980
		return connector_status_connected;
5981 5982

	/* Well we tried, say unknown for unreliable port types */
5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
		if (type == DP_DS_PORT_TYPE_VGA ||
		    type == DP_DS_PORT_TYPE_NON_EDID)
			return connector_status_unknown;
	} else {
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
			DP_DWN_STRM_PORT_TYPE_MASK;
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
			return connector_status_unknown;
	}
5995 5996

	/* Anything else is out of spec, warn and ignore */
5997
	drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
5998
	return connector_status_disconnected;
5999 6000
}

6001 6002 6003
static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
6004
	return connector_status_connected;
6005 6006
}

6007
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
6008
{
6009
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6010
	u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
6011

6012
	return intel_de_read(dev_priv, SDEISR) & bit;
6013 6014
}

6015
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
6016
{
6017
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6018
	u32 bit;
6019

6020 6021
	switch (encoder->hpd_pin) {
	case HPD_PORT_B:
6022 6023
		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
		break;
6024
	case HPD_PORT_C:
6025 6026
		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
		break;
6027
	case HPD_PORT_D:
6028 6029 6030
		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
		break;
	default:
6031
		MISSING_CASE(encoder->hpd_pin);
6032 6033 6034
		return false;
	}

6035
	return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
6036 6037
}

6038
static bool gm45_digital_port_connected(struct intel_encoder *encoder)
6039
{
6040
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6041 6042
	u32 bit;

6043 6044
	switch (encoder->hpd_pin) {
	case HPD_PORT_B:
6045
		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
6046
		break;
6047
	case HPD_PORT_C:
6048
		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
6049
		break;
6050
	case HPD_PORT_D:
6051
		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
6052 6053
		break;
	default:
6054
		MISSING_CASE(encoder->hpd_pin);
6055
		return false;
6056 6057
	}

6058
	return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
6059 6060
}

6061
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
6062
{
6063
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6064
	u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
6065

6066
	return intel_de_read(dev_priv, DEISR) & bit;
6067 6068
}

6069 6070
/*
 * intel_digital_port_connected - is the specified port connected?
6071
 * @encoder: intel_encoder
6072
 *
6073 6074 6075 6076 6077
 * In cases where there's a connector physically connected but it can't be used
 * by our hardware we also return false, since the rest of the driver should
 * pretty much treat the port as disconnected. This is relevant for type-C
 * (starting on ICL) where there's ownership involved.
 *
6078
 * Return %true if port is connected, %false otherwise.
6079
 */
6080 6081 6082
bool intel_digital_port_connected(struct intel_encoder *encoder)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6083
	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
6084
	bool is_connected = false;
6085 6086 6087
	intel_wakeref_t wakeref;

	with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
6088
		is_connected = dig_port->connected(encoder);
6089 6090 6091 6092

	return is_connected;
}

6093
static struct edid *
6094
intel_dp_get_edid(struct intel_dp *intel_dp)
6095
{
6096
	struct intel_connector *intel_connector = intel_dp->attached_connector;
6097

6098 6099 6100 6101
	/* use cached edid if we have one */
	if (intel_connector->edid) {
		/* invalid edid */
		if (IS_ERR(intel_connector->edid))
6102 6103
			return NULL;

J
Jani Nikula 已提交
6104
		return drm_edid_duplicate(intel_connector->edid);
6105 6106 6107 6108
	} else
		return drm_get_edid(&intel_connector->base,
				    &intel_dp->aux.ddc);
}
6109

6110 6111 6112 6113 6114
static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
	struct intel_connector *intel_connector = intel_dp->attached_connector;
	struct edid *edid;
6115

6116
	intel_dp_unset_edid(intel_dp);
6117 6118 6119
	edid = intel_dp_get_edid(intel_dp);
	intel_connector->detect_edid = edid;

6120
	intel_dp->has_audio = drm_detect_monitor_audio(edid);
6121
	drm_dp_cec_set_edid(&intel_dp->aux, edid);
L
Lyude Paul 已提交
6122
	intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
6123 6124
}

6125 6126
static void
intel_dp_unset_edid(struct intel_dp *intel_dp)
6127
{
6128
	struct intel_connector *intel_connector = intel_dp->attached_connector;
6129

6130
	drm_dp_cec_unset_edid(&intel_dp->aux);
6131 6132
	kfree(intel_connector->detect_edid);
	intel_connector->detect_edid = NULL;
6133

6134
	intel_dp->has_audio = false;
L
Lyude Paul 已提交
6135
	intel_dp->edid_quirks = 0;
6136
}
6137

6138
static int
6139 6140 6141
intel_dp_detect(struct drm_connector *connector,
		struct drm_modeset_acquire_ctx *ctx,
		bool force)
Z
Zhenyu Wang 已提交
6142
{
6143
	struct drm_i915_private *dev_priv = to_i915(connector->dev);
6144
	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6145 6146
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct intel_encoder *encoder = &dig_port->base;
Z
Zhenyu Wang 已提交
6147 6148
	enum drm_connector_status status;

6149 6150
	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
		    connector->base.id, connector->name);
6151 6152
	drm_WARN_ON(&dev_priv->drm,
		    !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
6153

6154
	/* Can't disconnect eDP */
6155
	if (intel_dp_is_edp(intel_dp))
6156
		status = edp_detect(intel_dp);
6157
	else if (intel_digital_port_connected(encoder))
6158
		status = intel_dp_detect_dpcd(intel_dp);
Z
Zhenyu Wang 已提交
6159
	else
6160 6161
		status = connector_status_disconnected;

6162
	if (status == connector_status_disconnected) {
6163
		memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
6164
		memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
6165

6166
		if (intel_dp->is_mst) {
6167 6168 6169 6170
			drm_dbg_kms(&dev_priv->drm,
				    "MST device may have disappeared %d vs %d\n",
				    intel_dp->is_mst,
				    intel_dp->mst_mgr.mst_state);
6171 6172 6173 6174 6175
			intel_dp->is_mst = false;
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
							intel_dp->is_mst);
		}

6176
		goto out;
6177
	}
Z
Zhenyu Wang 已提交
6178

6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189
	/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
	if (INTEL_GEN(dev_priv) >= 11)
		intel_dp_get_dsc_sink_cap(intel_dp);

	intel_dp_configure_mst(intel_dp);

	/*
	 * TODO: Reset link params when switching to MST mode, until MST
	 * supports link training fallback params.
	 */
	if (intel_dp->reset_link_params || intel_dp->is_mst) {
6190 6191
		/* Initial max link lane count */
		intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
6192

6193 6194
		/* Initial max link rate */
		intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
6195 6196 6197

		intel_dp->reset_link_params = false;
	}
6198

6199 6200
	intel_dp_print_rates(intel_dp);

6201
	if (intel_dp->is_mst) {
6202 6203 6204 6205 6206
		/*
		 * If we are in MST mode then this connector
		 * won't appear connected or have anything
		 * with EDID on it
		 */
6207 6208
		status = connector_status_disconnected;
		goto out;
6209 6210 6211 6212 6213 6214
	}

	/*
	 * Some external monitors do not signal loss of link synchronization
	 * with an IRQ_HPD, so force a link status check.
	 */
6215 6216 6217 6218
	if (!intel_dp_is_edp(intel_dp)) {
		int ret;

		ret = intel_dp_retrain_link(encoder, ctx);
6219
		if (ret)
6220 6221
			return ret;
	}
6222

6223 6224 6225 6226 6227 6228 6229 6230
	/*
	 * Clearing NACK and defer counts to get their exact values
	 * while reading EDID which are required by Compliance tests
	 * 4.2.2.4 and 4.2.2.5
	 */
	intel_dp->aux.i2c_nack_count = 0;
	intel_dp->aux.i2c_defer_count = 0;

6231
	intel_dp_set_edid(intel_dp);
6232 6233
	if (intel_dp_is_edp(intel_dp) ||
	    to_intel_connector(connector)->detect_edid)
6234
		status = connector_status_connected;
6235

6236
	intel_dp_check_service_irq(intel_dp);
6237

6238
out:
6239
	if (status != connector_status_connected && !intel_dp->is_mst)
6240
		intel_dp_unset_edid(intel_dp);
6241

6242 6243 6244 6245 6246 6247
	/*
	 * Make sure the refs for power wells enabled during detect are
	 * dropped to avoid a new detect cycle triggered by HPD polling.
	 */
	intel_display_power_flush_work(dev_priv);

6248
	return status;
6249 6250
}

6251 6252
static void
intel_dp_force(struct drm_connector *connector)
6253
{
6254
	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6255 6256
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct intel_encoder *intel_encoder = &dig_port->base;
6257
	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
6258 6259
	enum intel_display_power_domain aux_domain =
		intel_aux_power_domain(dig_port);
6260
	intel_wakeref_t wakeref;
6261

6262 6263
	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
		    connector->base.id, connector->name);
6264
	intel_dp_unset_edid(intel_dp);
6265

6266 6267
	if (connector->status != connector_status_connected)
		return;
6268

6269
	wakeref = intel_display_power_get(dev_priv, aux_domain);
6270 6271 6272

	intel_dp_set_edid(intel_dp);

6273
	intel_display_power_put(dev_priv, aux_domain, wakeref);
6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286
}

static int intel_dp_get_modes(struct drm_connector *connector)
{
	struct intel_connector *intel_connector = to_intel_connector(connector);
	struct edid *edid;

	edid = intel_connector->detect_edid;
	if (edid) {
		int ret = intel_connector_update_modes(connector, edid);
		if (ret)
			return ret;
	}
6287

6288
	/* if eDP has no EDID, fall back to fixed mode */
6289
	if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
6290
	    intel_connector->panel.fixed_mode) {
6291
		struct drm_display_mode *mode;
6292 6293

		mode = drm_mode_duplicate(connector->dev,
6294
					  intel_connector->panel.fixed_mode);
6295
		if (mode) {
6296 6297 6298 6299
			drm_mode_probed_add(connector, mode);
			return 1;
		}
	}
6300

6301
	return 0;
6302 6303
}

6304 6305 6306
static int
intel_dp_connector_register(struct drm_connector *connector)
{
6307
	struct drm_i915_private *i915 = to_i915(connector->dev);
6308
	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6309 6310 6311 6312 6313
	int ret;

	ret = intel_connector_register(connector);
	if (ret)
		return ret;
6314

6315 6316
	drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
		    intel_dp->aux.name, connector->kdev->kobj.name);
6317 6318

	intel_dp->aux.dev = connector->kdev;
6319 6320
	ret = drm_dp_aux_register(&intel_dp->aux);
	if (!ret)
6321
		drm_dp_cec_register_connector(&intel_dp->aux, connector);
6322
	return ret;
6323 6324
}

6325 6326 6327
static void
intel_dp_connector_unregister(struct drm_connector *connector)
{
6328
	struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6329 6330 6331

	drm_dp_cec_unregister_connector(&intel_dp->aux);
	drm_dp_aux_unregister(&intel_dp->aux);
6332 6333 6334
	intel_connector_unregister(connector);
}

6335
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
6336
{
6337 6338
	struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
	struct intel_dp *intel_dp = &dig_port->dp;
6339

6340
	intel_dp_mst_encoder_cleanup(dig_port);
6341
	if (intel_dp_is_edp(intel_dp)) {
6342 6343
		intel_wakeref_t wakeref;

6344
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6345 6346 6347 6348
		/*
		 * vdd might still be enabled do to the delayed vdd off.
		 * Make sure vdd is actually turned off here.
		 */
6349 6350
		with_pps_lock(intel_dp, wakeref)
			edp_panel_vdd_off_sync(intel_dp);
6351

6352 6353 6354 6355
		if (intel_dp->edp_notifier.notifier_call) {
			unregister_reboot_notifier(&intel_dp->edp_notifier);
			intel_dp->edp_notifier.notifier_call = NULL;
		}
6356
	}
6357 6358

	intel_dp_aux_fini(intel_dp);
6359 6360 6361 6362 6363
}

static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
	intel_dp_encoder_flush_work(encoder);
6364

6365
	drm_encoder_cleanup(encoder);
6366
	kfree(enc_to_dig_port(to_intel_encoder(encoder)));
6367 6368
}

6369
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
6370
{
6371
	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6372
	intel_wakeref_t wakeref;
6373

6374
	if (!intel_dp_is_edp(intel_dp))
6375 6376
		return;

6377 6378 6379 6380
	/*
	 * vdd might still be enabled do to the delayed vdd off.
	 * Make sure vdd is actually turned off here.
	 */
6381
	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6382 6383
	with_pps_lock(intel_dp, wakeref)
		edp_panel_vdd_off_sync(intel_dp);
6384 6385
}

6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
{
	long ret;

#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
	ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
					       msecs_to_jiffies(timeout));

	if (!ret)
		DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
}

6398
static
6399
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
6400 6401
				u8 *an)
{
6402 6403
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base));
6404 6405 6406 6407 6408
	static const struct drm_dp_aux_msg msg = {
		.request = DP_AUX_NATIVE_WRITE,
		.address = DP_AUX_HDCP_AKSV,
		.size = DRM_HDCP_KSV_LEN,
	};
6409
	u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
6410 6411 6412 6413
	ssize_t dpcd_ret;
	int ret;

	/* Output An first, that's easy */
6414
	dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN,
6415 6416
				     an, DRM_HDCP_AN_LEN);
	if (dpcd_ret != DRM_HDCP_AN_LEN) {
6417 6418 6419
		drm_dbg_kms(&i915->drm,
			    "Failed to write An over DP/AUX (%zd)\n",
			    dpcd_ret);
6420 6421 6422 6423 6424 6425 6426 6427 6428
		return dpcd_ret >= 0 ? -EIO : dpcd_ret;
	}

	/*
	 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
	 * order to get it on the wire, we need to create the AUX header as if
	 * we were writing the data, and then tickle the hardware to output the
	 * data once the header is sent out.
	 */
6429
	intel_dp_aux_header(txbuf, &msg);
6430

6431
	ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
6432 6433
				rxbuf, sizeof(rxbuf),
				DP_AUX_CH_CTL_AUX_AKSV_SELECT);
6434
	if (ret < 0) {
6435 6436
		drm_dbg_kms(&i915->drm,
			    "Write Aksv over DP/AUX failed (%d)\n", ret);
6437 6438
		return ret;
	} else if (ret == 0) {
6439
		drm_dbg_kms(&i915->drm, "Aksv write over DP/AUX was empty\n");
6440 6441 6442 6443
		return -EIO;
	}

	reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
6444
	if (reply != DP_AUX_NATIVE_REPLY_ACK) {
6445 6446 6447
		drm_dbg_kms(&i915->drm,
			    "Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
			    reply);
6448 6449 6450
		return -EIO;
	}
	return 0;
6451 6452
}

6453
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port,
6454 6455
				   u8 *bksv)
{
6456
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6457
	ssize_t ret;
6458

6459
	ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
6460 6461
			       DRM_HDCP_KSV_LEN);
	if (ret != DRM_HDCP_KSV_LEN) {
6462 6463
		drm_dbg_kms(&i915->drm,
			    "Read Bksv from DP/AUX failed (%zd)\n", ret);
6464 6465 6466 6467 6468
		return ret >= 0 ? -EIO : ret;
	}
	return 0;
}

6469
static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
6470 6471
				      u8 *bstatus)
{
6472
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6473
	ssize_t ret;
6474

6475 6476 6477 6478 6479
	/*
	 * For some reason the HDMI and DP HDCP specs call this register
	 * definition by different names. In the HDMI spec, it's called BSTATUS,
	 * but in DP it's called BINFO.
	 */
6480
	ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO,
6481 6482
			       bstatus, DRM_HDCP_BSTATUS_LEN);
	if (ret != DRM_HDCP_BSTATUS_LEN) {
6483 6484
		drm_dbg_kms(&i915->drm,
			    "Read bstatus from DP/AUX failed (%zd)\n", ret);
6485 6486 6487 6488 6489 6490
		return ret >= 0 ? -EIO : ret;
	}
	return 0;
}

static
6491
int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
6492
			     u8 *bcaps)
6493
{
6494
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6495
	ssize_t ret;
6496

6497
	ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
6498
			       bcaps, 1);
6499
	if (ret != 1) {
6500 6501
		drm_dbg_kms(&i915->drm,
			    "Read bcaps from DP/AUX failed (%zd)\n", ret);
6502 6503
		return ret >= 0 ? -EIO : ret;
	}
6504 6505 6506 6507 6508

	return 0;
}

static
6509
int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
6510 6511 6512 6513 6514
				   bool *repeater_present)
{
	ssize_t ret;
	u8 bcaps;

6515
	ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
6516 6517 6518
	if (ret)
		return ret;

6519 6520 6521 6522 6523
	*repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
	return 0;
}

static
6524
int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
6525 6526
				u8 *ri_prime)
{
6527
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6528
	ssize_t ret;
6529

6530
	ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
6531 6532
			       ri_prime, DRM_HDCP_RI_LEN);
	if (ret != DRM_HDCP_RI_LEN) {
6533 6534
		drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n",
			    ret);
6535 6536 6537 6538 6539 6540
		return ret >= 0 ? -EIO : ret;
	}
	return 0;
}

static
6541
int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
6542 6543
				 bool *ksv_ready)
{
6544
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6545 6546
	ssize_t ret;
	u8 bstatus;
6547

6548
	ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
6549 6550
			       &bstatus, 1);
	if (ret != 1) {
6551 6552
		drm_dbg_kms(&i915->drm,
			    "Read bstatus from DP/AUX failed (%zd)\n", ret);
6553 6554 6555 6556 6557 6558 6559
		return ret >= 0 ? -EIO : ret;
	}
	*ksv_ready = bstatus & DP_BSTATUS_READY;
	return 0;
}

static
6560
int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
6561 6562
				int num_downstream, u8 *ksv_fifo)
{
6563
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6564 6565 6566 6567 6568 6569
	ssize_t ret;
	int i;

	/* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
	for (i = 0; i < num_downstream; i += 3) {
		size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
6570
		ret = drm_dp_dpcd_read(&dig_port->dp.aux,
6571 6572 6573 6574
				       DP_AUX_HDCP_KSV_FIFO,
				       ksv_fifo + i * DRM_HDCP_KSV_LEN,
				       len);
		if (ret != len) {
6575 6576 6577
			drm_dbg_kms(&i915->drm,
				    "Read ksv[%d] from DP/AUX failed (%zd)\n",
				    i, ret);
6578 6579 6580 6581 6582 6583 6584
			return ret >= 0 ? -EIO : ret;
		}
	}
	return 0;
}

static
6585
int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
6586 6587
				    int i, u32 *part)
{
6588
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6589 6590 6591 6592 6593
	ssize_t ret;

	if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
		return -EINVAL;

6594
	ret = drm_dp_dpcd_read(&dig_port->dp.aux,
6595 6596 6597
			       DP_AUX_HDCP_V_PRIME(i), part,
			       DRM_HDCP_V_PRIME_PART_LEN);
	if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
6598 6599
		drm_dbg_kms(&i915->drm,
			    "Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
6600 6601 6602 6603 6604 6605
		return ret >= 0 ? -EIO : ret;
	}
	return 0;
}

static
6606
int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
6607 6608 6609 6610 6611 6612 6613
				    bool enable)
{
	/* Not used for single stream DisplayPort setups */
	return 0;
}

static
6614
bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port)
6615
{
6616
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6617 6618
	ssize_t ret;
	u8 bstatus;
6619

6620
	ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
6621 6622
			       &bstatus, 1);
	if (ret != 1) {
6623 6624
		drm_dbg_kms(&i915->drm,
			    "Read bstatus from DP/AUX failed (%zd)\n", ret);
6625
		return false;
6626
	}
6627

6628 6629 6630
	return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
}

6631
static
6632
int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
6633 6634 6635 6636 6637
			  bool *hdcp_capable)
{
	ssize_t ret;
	u8 bcaps;

6638
	ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
6639 6640 6641 6642 6643 6644 6645
	if (ret)
		return ret;

	*hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
	return 0;
}

6646 6647 6648 6649 6650
struct hdcp2_dp_errata_stream_type {
	u8	msg_id;
	u8	stream_type;
} __packed;

6651
struct hdcp2_dp_msg_data {
6652 6653 6654 6655 6656
	u8 msg_id;
	u32 offset;
	bool msg_detectable;
	u32 timeout;
	u32 timeout2; /* Added for non_paired situation */
6657 6658
};

6659
static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687
	{ HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
	{ HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
	  false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
	{ HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
	  false, 0, 0 },
	{ HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
	  false, 0, 0 },
	{ HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
	  true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
	  HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
	{ HDCP_2_2_AKE_SEND_PAIRING_INFO,
	  DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
	  HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
	{ HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
	{ HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
	  false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
	{ HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
	  0, 0 },
	{ HDCP_2_2_REP_SEND_RECVID_LIST,
	  DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
	  HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
	{ HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
	  0, 0 },
	{ HDCP_2_2_REP_STREAM_MANAGE,
	  DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
	  0, 0 },
	{ HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
	  false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
6688 6689
/* local define to shovel this through the write_2_2 interface */
#define HDCP_2_2_ERRATA_DP_STREAM_TYPE	50
6690 6691 6692 6693
	{ HDCP_2_2_ERRATA_DP_STREAM_TYPE,
	  DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
	  0, 0 },
};
6694

6695
static int
6696
intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port,
6697
			      u8 *rx_status)
6698
{
6699
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6700 6701
	ssize_t ret;

6702
	ret = drm_dp_dpcd_read(&dig_port->dp.aux,
6703 6704 6705
			       DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
			       HDCP_2_2_DP_RXSTATUS_LEN);
	if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
6706 6707
		drm_dbg_kms(&i915->drm,
			    "Read bstatus from DP/AUX failed (%zd)\n", ret);
6708 6709 6710 6711 6712 6713 6714
		return ret >= 0 ? -EIO : ret;
	}

	return 0;
}

static
6715
int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
6716 6717 6718 6719 6720 6721
				  u8 msg_id, bool *msg_ready)
{
	u8 rx_status;
	int ret;

	*msg_ready = false;
6722
	ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747
	if (ret < 0)
		return ret;

	switch (msg_id) {
	case HDCP_2_2_AKE_SEND_HPRIME:
		if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
			*msg_ready = true;
		break;
	case HDCP_2_2_AKE_SEND_PAIRING_INFO:
		if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
			*msg_ready = true;
		break;
	case HDCP_2_2_REP_SEND_RECVID_LIST:
		if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
			*msg_ready = true;
		break;
	default:
		DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
		return -EINVAL;
	}

	return 0;
}

static ssize_t
6748
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
6749
			    const struct hdcp2_dp_msg_data *hdcp2_msg_data)
6750
{
6751 6752
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_dp *dp = &dig_port->dp;
6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770
	struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
	u8 msg_id = hdcp2_msg_data->msg_id;
	int ret, timeout;
	bool msg_ready = false;

	if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
		timeout = hdcp2_msg_data->timeout2;
	else
		timeout = hdcp2_msg_data->timeout;

	/*
	 * There is no way to detect the CERT, LPRIME and STREAM_READY
	 * availability. So Wait for timeout and read the msg.
	 */
	if (!hdcp2_msg_data->msg_detectable) {
		mdelay(timeout);
		ret = 0;
	} else {
6771 6772 6773 6774 6775
		/*
		 * As we want to check the msg availability at timeout, Ignoring
		 * the timeout at wait for CP_IRQ.
		 */
		intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6776
		ret = hdcp2_detect_msg_availability(dig_port,
6777
						    msg_id, &msg_ready);
6778 6779 6780 6781 6782
		if (!msg_ready)
			ret = -ETIMEDOUT;
	}

	if (ret)
6783 6784 6785
		drm_dbg_kms(&i915->drm,
			    "msg_id %d, ret %d, timeout(mSec): %d\n",
			    hdcp2_msg_data->msg_id, ret, timeout);
6786 6787 6788 6789

	return ret;
}

6790
static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6791 6792 6793
{
	int i;

6794 6795 6796
	for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
		if (hdcp2_dp_msg_data[i].msg_id == msg_id)
			return &hdcp2_dp_msg_data[i];
6797 6798 6799 6800 6801

	return NULL;
}

static
6802
int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
6803 6804
			     void *buf, size_t size)
{
6805
	struct intel_dp *dp = &dig_port->dp;
6806
	struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6807 6808 6809
	unsigned int offset;
	u8 *byte = buf;
	ssize_t ret, bytes_to_write, len;
6810
	const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821

	hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
	if (!hdcp2_msg_data)
		return -EINVAL;

	offset = hdcp2_msg_data->offset;

	/* No msg_id in DP HDCP2.2 msgs */
	bytes_to_write = size - 1;
	byte++;

6822 6823
	hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);

6824 6825 6826 6827
	while (bytes_to_write) {
		len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
				DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;

6828
		ret = drm_dp_dpcd_write(&dig_port->dp.aux,
6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841
					offset, (void *)byte, len);
		if (ret < 0)
			return ret;

		bytes_to_write -= ret;
		byte += ret;
		offset += ret;
	}

	return size;
}

static
6842
ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
6843 6844 6845 6846 6847
{
	u8 rx_info[HDCP_2_2_RXINFO_LEN];
	u32 dev_cnt;
	ssize_t ret;

6848
	ret = drm_dp_dpcd_read(&dig_port->dp.aux,
6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867
			       DP_HDCP_2_2_REG_RXINFO_OFFSET,
			       (void *)rx_info, HDCP_2_2_RXINFO_LEN);
	if (ret != HDCP_2_2_RXINFO_LEN)
		return ret >= 0 ? -EIO : ret;

	dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
		   HDCP_2_2_DEV_COUNT_LO(rx_info[1]));

	if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
		dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;

	ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
		HDCP_2_2_RECEIVER_IDS_MAX_LEN +
		(dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);

	return ret;
}

static
6868
int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
6869 6870
			    u8 msg_id, void *buf, size_t size)
{
6871
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6872 6873 6874
	unsigned int offset;
	u8 *byte = buf;
	ssize_t ret, bytes_to_recv, len;
6875
	const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6876 6877 6878 6879 6880 6881

	hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
	if (!hdcp2_msg_data)
		return -EINVAL;
	offset = hdcp2_msg_data->offset;

6882
	ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data);
6883 6884 6885 6886
	if (ret < 0)
		return ret;

	if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6887
		ret = get_receiver_id_list_size(dig_port);
6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901
		if (ret < 0)
			return ret;

		size = ret;
	}
	bytes_to_recv = size - 1;

	/* DP adaptation msgs has no msg_id */
	byte++;

	while (bytes_to_recv) {
		len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
		      DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;

6902
		ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset,
6903 6904
				       (void *)byte, len);
		if (ret < 0) {
6905 6906
			drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n",
				    msg_id, ret);
6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920
			return ret;
		}

		bytes_to_recv -= ret;
		byte += ret;
		offset += ret;
	}
	byte = buf;
	*byte = msg_id;

	return size;
}

static
6921
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port,
6922 6923
				      bool is_repeater, u8 content_type)
{
6924
	int ret;
6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939
	struct hdcp2_dp_errata_stream_type stream_type_msg;

	if (is_repeater)
		return 0;

	/*
	 * Errata for DP: As Stream type is used for encryption, Receiver
	 * should be communicated with stream type for the decryption of the
	 * content.
	 * Repeater will be communicated with stream type as a part of it's
	 * auth later in time.
	 */
	stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
	stream_type_msg.stream_type = content_type;

6940
	ret =  intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg,
6941
					sizeof(stream_type_msg));
6942 6943 6944

	return ret < 0 ? ret : 0;

6945 6946 6947
}

static
6948
int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port)
6949 6950 6951 6952
{
	u8 rx_status;
	int ret;

6953
	ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status);
6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967
	if (ret)
		return ret;

	if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
		ret = HDCP_REAUTH_REQUEST;
	else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
		ret = HDCP_LINK_INTEGRITY_FAILURE;
	else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
		ret = HDCP_TOPOLOGY_CHANGE;

	return ret;
}

static
6968
int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
6969 6970 6971 6972 6973 6974
			   bool *capable)
{
	u8 rx_caps[3];
	int ret;

	*capable = false;
6975
	ret = drm_dp_dpcd_read(&dig_port->dp.aux,
6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987
			       DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
			       rx_caps, HDCP_2_2_RXCAPS_LEN);
	if (ret != HDCP_2_2_RXCAPS_LEN)
		return ret >= 0 ? -EIO : ret;

	if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
	    HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
		*capable = true;

	return 0;
}

6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
	.write_an_aksv = intel_dp_hdcp_write_an_aksv,
	.read_bksv = intel_dp_hdcp_read_bksv,
	.read_bstatus = intel_dp_hdcp_read_bstatus,
	.repeater_present = intel_dp_hdcp_repeater_present,
	.read_ri_prime = intel_dp_hdcp_read_ri_prime,
	.read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
	.read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
	.read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
	.toggle_signalling = intel_dp_hdcp_toggle_signalling,
	.check_link = intel_dp_hdcp_check_link,
6999
	.hdcp_capable = intel_dp_hdcp_capable,
7000 7001 7002 7003 7004 7005
	.write_2_2_msg = intel_dp_hdcp2_write_msg,
	.read_2_2_msg = intel_dp_hdcp2_read_msg,
	.config_stream_type = intel_dp_hdcp2_config_stream_type,
	.check_2_2_link = intel_dp_hdcp2_check_link,
	.hdcp_2_2_capable = intel_dp_hdcp2_capable,
	.protocol = HDCP_PROTOCOL_DP,
7006 7007
};

7008 7009
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
7010
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7011
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023

	lockdep_assert_held(&dev_priv->pps_mutex);

	if (!edp_have_panel_vdd(intel_dp))
		return;

	/*
	 * The VDD bit needs a power domain reference, so if the bit is
	 * already enabled when we boot or resume, grab this reference and
	 * schedule a vdd off, so we don't hold on to the reference
	 * indefinitely.
	 */
7024 7025
	drm_dbg_kms(&dev_priv->drm,
		    "VDD left on by BIOS, adjusting state tracking\n");
7026
	intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
7027 7028 7029 7030

	edp_panel_vdd_schedule_off(intel_dp);
}

7031 7032
static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
7033
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7034 7035
	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
	enum pipe pipe;
7036

7037 7038 7039
	if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
				  encoder->port, &pipe))
		return pipe;
7040

7041
	return INVALID_PIPE;
7042 7043
}

7044
void intel_dp_encoder_reset(struct drm_encoder *encoder)
7045
{
7046
	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
7047
	struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
7048
	struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
7049
	intel_wakeref_t wakeref;
7050 7051

	if (!HAS_DDI(dev_priv))
7052
		intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
7053

7054
	if (lspcon->active)
7055 7056
		lspcon_resume(lspcon);

7057 7058
	intel_dp->reset_link_params = true;

7059 7060 7061 7062
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
	    !intel_dp_is_edp(intel_dp))
		return;

7063 7064 7065
	with_pps_lock(intel_dp, wakeref) {
		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
			intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7066

7067 7068 7069 7070 7071 7072 7073 7074
		if (intel_dp_is_edp(intel_dp)) {
			/*
			 * Reinit the power sequencer, in case BIOS did
			 * something nasty with it.
			 */
			intel_dp_pps_init(intel_dp);
			intel_edp_panel_vdd_sanitize(intel_dp);
		}
7075
	}
7076 7077
}

7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114
static int intel_modeset_tile_group(struct intel_atomic_state *state,
				    int tile_group_id)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct drm_connector_list_iter conn_iter;
	struct drm_connector *connector;
	int ret = 0;

	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
	drm_for_each_connector_iter(connector, &conn_iter) {
		struct drm_connector_state *conn_state;
		struct intel_crtc_state *crtc_state;
		struct intel_crtc *crtc;

		if (!connector->has_tile ||
		    connector->tile_group->id != tile_group_id)
			continue;

		conn_state = drm_atomic_get_connector_state(&state->base,
							    connector);
		if (IS_ERR(conn_state)) {
			ret = PTR_ERR(conn_state);
			break;
		}

		crtc = to_intel_crtc(conn_state->crtc);

		if (!crtc)
			continue;

		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
		crtc_state->uapi.mode_changed = true;

		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
		if (ret)
			break;
	}
7115
	drm_connector_list_iter_end(&conn_iter);
7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154

	return ret;
}

static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	struct intel_crtc *crtc;

	if (transcoders == 0)
		return 0;

	for_each_intel_crtc(&dev_priv->drm, crtc) {
		struct intel_crtc_state *crtc_state;
		int ret;

		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
		if (IS_ERR(crtc_state))
			return PTR_ERR(crtc_state);

		if (!crtc_state->hw.enable)
			continue;

		if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
			continue;

		crtc_state->uapi.mode_changed = true;

		ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
		if (ret)
			return ret;

		ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
		if (ret)
			return ret;

		transcoders &= ~BIT(crtc_state->cpu_transcoder);
	}

7155
	drm_WARN_ON(&dev_priv->drm, transcoders != 0);
7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196

	return 0;
}

static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
				      struct drm_connector *connector)
{
	const struct drm_connector_state *old_conn_state =
		drm_atomic_get_old_connector_state(&state->base, connector);
	const struct intel_crtc_state *old_crtc_state;
	struct intel_crtc *crtc;
	u8 transcoders;

	crtc = to_intel_crtc(old_conn_state->crtc);
	if (!crtc)
		return 0;

	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);

	if (!old_crtc_state->hw.active)
		return 0;

	transcoders = old_crtc_state->sync_mode_slaves_mask;
	if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
		transcoders |= BIT(old_crtc_state->master_transcoder);

	return intel_modeset_affected_transcoders(state,
						  transcoders);
}

static int intel_dp_connector_atomic_check(struct drm_connector *conn,
					   struct drm_atomic_state *_state)
{
	struct drm_i915_private *dev_priv = to_i915(conn->dev);
	struct intel_atomic_state *state = to_intel_atomic_state(_state);
	int ret;

	ret = intel_digital_connector_atomic_check(conn, &state->base);
	if (ret)
		return ret;

7197 7198 7199 7200 7201
	/*
	 * We don't enable port sync on BDW due to missing w/as and
	 * due to not having adjusted the modeset sequence appropriately.
	 */
	if (INTEL_GEN(dev_priv) < 9)
7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215
		return 0;

	if (!intel_connector_needs_modeset(state, conn))
		return 0;

	if (conn->has_tile) {
		ret = intel_modeset_tile_group(state, conn->tile_group->id);
		if (ret)
			return ret;
	}

	return intel_modeset_synced_crtcs(state, conn);
}

7216
static const struct drm_connector_funcs intel_dp_connector_funcs = {
7217
	.force = intel_dp_force,
7218
	.fill_modes = drm_helper_probe_single_connector_modes,
7219 7220
	.atomic_get_property = intel_digital_connector_atomic_get_property,
	.atomic_set_property = intel_digital_connector_atomic_set_property,
7221
	.late_register = intel_dp_connector_register,
7222
	.early_unregister = intel_dp_connector_unregister,
7223
	.destroy = intel_connector_destroy,
7224
	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7225
	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
7226 7227 7228
};

static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
7229
	.detect_ctx = intel_dp_detect,
7230 7231
	.get_modes = intel_dp_get_modes,
	.mode_valid = intel_dp_mode_valid,
7232
	.atomic_check = intel_dp_connector_atomic_check,
7233 7234 7235
};

static const struct drm_encoder_funcs intel_dp_enc_funcs = {
7236
	.reset = intel_dp_encoder_reset,
7237
	.destroy = intel_dp_encoder_destroy,
7238 7239
};

7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252
static bool intel_edp_have_power(struct intel_dp *intel_dp)
{
	intel_wakeref_t wakeref;
	bool have_power = false;

	with_pps_lock(intel_dp, wakeref) {
		have_power = edp_have_panel_power(intel_dp) &&
						  edp_have_panel_vdd(intel_dp);
	}

	return have_power;
}

7253
enum irqreturn
7254
intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
7255
{
7256 7257
	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
	struct intel_dp *intel_dp = &dig_port->dp;
7258

7259
	if (dig_port->base.type == INTEL_OUTPUT_EDP &&
7260
	    (long_hpd || !intel_edp_have_power(intel_dp))) {
7261
		/*
7262
		 * vdd off can generate a long/short pulse on eDP which
7263 7264
		 * would require vdd on to handle it, and thus we
		 * would end up in an endless cycle of
7265
		 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
7266
		 */
7267 7268 7269
		drm_dbg_kms(&i915->drm,
			    "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
			    long_hpd ? "long" : "short",
7270 7271
			    dig_port->base.base.base.id,
			    dig_port->base.base.name);
7272
		return IRQ_HANDLED;
7273 7274
	}

7275
	drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
7276 7277
		    dig_port->base.base.base.id,
		    dig_port->base.base.name,
7278
		    long_hpd ? "long" : "short");
7279

7280
	if (long_hpd) {
7281
		intel_dp->reset_link_params = true;
7282 7283 7284 7285
		return IRQ_NONE;
	}

	if (intel_dp->is_mst) {
7286
		if (!intel_dp_check_mst_status(intel_dp))
7287
			return IRQ_NONE;
7288 7289
	} else if (!intel_dp_short_pulse(intel_dp)) {
		return IRQ_NONE;
7290
	}
7291

7292
	return IRQ_HANDLED;
7293 7294
}

7295
/* check the VBT to see whether the eDP is on another port */
7296
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
7297
{
7298 7299 7300 7301
	/*
	 * eDP not supported on g4x. so bail out early just
	 * for a bit extra safety in case the VBT is bonkers.
	 */
7302
	if (INTEL_GEN(dev_priv) < 5)
7303 7304
		return false;

7305
	if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
7306 7307
		return true;

7308
	return intel_bios_is_port_edp(dev_priv, port);
7309 7310
}

7311
static void
7312 7313
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
7314
	struct drm_i915_private *dev_priv = to_i915(connector->dev);
7315 7316 7317 7318
	enum port port = dp_to_dig_port(intel_dp)->base.port;

	if (!IS_G4X(dev_priv) && port != PORT_A)
		intel_attach_force_audio_property(connector);
7319

7320
	intel_attach_broadcast_rgb_property(connector);
R
Rodrigo Vivi 已提交
7321
	if (HAS_GMCH(dev_priv))
7322 7323 7324
		drm_connector_attach_max_bpc_property(connector, 6, 10);
	else if (INTEL_GEN(dev_priv) >= 5)
		drm_connector_attach_max_bpc_property(connector, 6, 12);
7325

7326 7327
	intel_attach_colorspace_property(connector);

7328 7329 7330 7331 7332
	if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
		drm_object_attach_property(&connector->base,
					   connector->dev->mode_config.hdr_output_metadata_property,
					   0);

7333
	if (intel_dp_is_edp(intel_dp)) {
7334 7335 7336
		u32 allowed_scalers;

		allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
R
Rodrigo Vivi 已提交
7337
		if (!HAS_GMCH(dev_priv))
7338 7339 7340 7341
			allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);

		drm_connector_attach_scaling_mode_property(connector, allowed_scalers);

7342
		connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
7343

7344
	}
7345 7346
}

7347 7348
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
{
7349
	intel_dp->panel_power_off_time = ktime_get_boottime();
7350 7351 7352 7353
	intel_dp->last_power_on = jiffies;
	intel_dp->last_backlight_off = jiffies;
}

7354
static void
7355
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
7356
{
7357
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7358
	u32 pp_on, pp_off, pp_ctl;
7359
	struct pps_registers regs;
7360

7361
	intel_pps_get_registers(intel_dp, &regs);
7362

7363
	pp_ctl = ilk_get_pp_control(intel_dp);
7364

7365 7366
	/* Ensure PPS is unlocked */
	if (!HAS_DDI(dev_priv))
7367
		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
7368

7369 7370
	pp_on = intel_de_read(dev_priv, regs.pp_on);
	pp_off = intel_de_read(dev_priv, regs.pp_off);
7371 7372

	/* Pull timing values out of registers */
7373 7374 7375 7376
	seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
	seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
	seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
	seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
7377

7378 7379 7380
	if (i915_mmio_reg_valid(regs.pp_div)) {
		u32 pp_div;

7381
		pp_div = intel_de_read(dev_priv, regs.pp_div);
7382

7383
		seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
7384
	} else {
7385
		seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
7386
	}
7387 7388
}

I
Imre Deak 已提交
7389 7390 7391 7392 7393 7394 7395 7396 7397
static void
intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
{
	DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
		      state_name,
		      seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
}

static void
7398
intel_pps_verify_state(struct intel_dp *intel_dp)
I
Imre Deak 已提交
7399 7400 7401 7402
{
	struct edp_power_seq hw;
	struct edp_power_seq *sw = &intel_dp->pps_delays;

7403
	intel_pps_readout_hw_state(intel_dp, &hw);
I
Imre Deak 已提交
7404 7405 7406 7407 7408 7409 7410 7411 7412

	if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
	    hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
		DRM_ERROR("PPS state mismatch\n");
		intel_pps_dump_state("sw", sw);
		intel_pps_dump_state("hw", &hw);
	}
}

7413
static void
7414
intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
7415
{
7416
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7417 7418 7419 7420 7421 7422 7423 7424 7425
	struct edp_power_seq cur, vbt, spec,
		*final = &intel_dp->pps_delays;

	lockdep_assert_held(&dev_priv->pps_mutex);

	/* already initialized? */
	if (final->t11_t12 != 0)
		return;

7426
	intel_pps_readout_hw_state(intel_dp, &cur);
7427

I
Imre Deak 已提交
7428
	intel_pps_dump_state("cur", &cur);
7429

7430
	vbt = dev_priv->vbt.edp.pps;
7431 7432 7433 7434 7435 7436
	/* On Toshiba Satellite P50-C-18C system the VBT T12 delay
	 * of 500ms appears to be too short. Ocassionally the panel
	 * just fails to power back on. Increasing the delay to 800ms
	 * seems sufficient to avoid this problem.
	 */
	if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
7437
		vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
7438 7439 7440
		drm_dbg_kms(&dev_priv->drm,
			    "Increasing T12 panel delay as per the quirk to %d\n",
			    vbt.t11_t12);
7441
	}
7442 7443 7444 7445 7446
	/* T11_T12 delay is special and actually in units of 100ms, but zero
	 * based in the hw (so we need to add 100 ms). But the sw vbt
	 * table multiplies it with 1000 to make it in units of 100usec,
	 * too. */
	vbt.t11_t12 += 100 * 10;
7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459

	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
	 * our hw here, which are all in 100usec. */
	spec.t1_t3 = 210 * 10;
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
	spec.t10 = 500 * 10;
	/* This one is special and actually in units of 100ms, but zero
	 * based in the hw (so we need to add 100 ms). But the sw vbt
	 * table multiplies it with 1000 to make it in units of 100usec,
	 * too. */
	spec.t11_t12 = (510 + 100) * 10;

I
Imre Deak 已提交
7460
	intel_pps_dump_state("vbt", &vbt);
7461 7462 7463

	/* Use the max of the register settings and vbt. If both are
	 * unset, fall back to the spec limits. */
7464
#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
7465 7466 7467 7468 7469 7470 7471 7472 7473
				       spec.field : \
				       max(cur.field, vbt.field))
	assign_final(t1_t3);
	assign_final(t8);
	assign_final(t9);
	assign_final(t10);
	assign_final(t11_t12);
#undef assign_final

7474
#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
7475 7476 7477 7478 7479 7480 7481
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
	intel_dp->backlight_on_delay = get_delay(t8);
	intel_dp->backlight_off_delay = get_delay(t9);
	intel_dp->panel_power_down_delay = get_delay(t10);
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay

7482 7483 7484 7485 7486
	drm_dbg_kms(&dev_priv->drm,
		    "panel power up delay %d, power down delay %d, power cycle delay %d\n",
		    intel_dp->panel_power_up_delay,
		    intel_dp->panel_power_down_delay,
		    intel_dp->panel_power_cycle_delay);
7487

7488 7489 7490
	drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
		    intel_dp->backlight_on_delay,
		    intel_dp->backlight_off_delay);
I
Imre Deak 已提交
7491 7492 7493 7494 7495 7496 7497 7498 7499 7500

	/*
	 * We override the HW backlight delays to 1 because we do manual waits
	 * on them. For T8, even BSpec recommends doing it. For T9, if we
	 * don't do this, we'll end up waiting for the backlight off delay
	 * twice: once when we do the manual sleep, and once when we disable
	 * the panel and wait for the PP_STATUS bit to become zero.
	 */
	final->t8 = 1;
	final->t9 = 1;
7501 7502 7503 7504 7505 7506

	/*
	 * HW has only a 100msec granularity for t11_t12 so round it up
	 * accordingly.
	 */
	final->t11_t12 = roundup(final->t11_t12, 100 * 10);
7507 7508 7509
}

static void
7510
intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
7511
					      bool force_disable_vdd)
7512
{
7513
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7514
	u32 pp_on, pp_off, port_sel = 0;
7515
	int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
7516
	struct pps_registers regs;
7517
	enum port port = dp_to_dig_port(intel_dp)->base.port;
7518
	const struct edp_power_seq *seq = &intel_dp->pps_delays;
7519

V
Ville Syrjälä 已提交
7520
	lockdep_assert_held(&dev_priv->pps_mutex);
7521

7522
	intel_pps_get_registers(intel_dp, &regs);
7523

7524 7525
	/*
	 * On some VLV machines the BIOS can leave the VDD
7526
	 * enabled even on power sequencers which aren't
7527 7528 7529 7530 7531 7532 7533
	 * hooked up to any port. This would mess up the
	 * power domain tracking the first time we pick
	 * one of these power sequencers for use since
	 * edp_panel_vdd_on() would notice that the VDD was
	 * already on and therefore wouldn't grab the power
	 * domain reference. Disable VDD first to avoid this.
	 * This also avoids spuriously turning the VDD on as
7534
	 * soon as the new power sequencer gets initialized.
7535 7536
	 */
	if (force_disable_vdd) {
7537
		u32 pp = ilk_get_pp_control(intel_dp);
7538

7539 7540
		drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
			 "Panel power already on\n");
7541 7542

		if (pp & EDP_FORCE_VDD)
7543 7544
			drm_dbg_kms(&dev_priv->drm,
				    "VDD already on, disabling first\n");
7545 7546 7547

		pp &= ~EDP_FORCE_VDD;

7548
		intel_de_write(dev_priv, regs.pp_ctrl, pp);
7549 7550
	}

7551 7552 7553 7554
	pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
		REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
	pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
		REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
7555 7556 7557

	/* Haswell doesn't have any port selection bits for the panel
	 * power sequencer any more. */
7558
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7559
		port_sel = PANEL_PORT_SELECT_VLV(port);
7560
	} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7561 7562
		switch (port) {
		case PORT_A:
7563
			port_sel = PANEL_PORT_SELECT_DPA;
7564 7565 7566 7567 7568
			break;
		case PORT_C:
			port_sel = PANEL_PORT_SELECT_DPC;
			break;
		case PORT_D:
7569
			port_sel = PANEL_PORT_SELECT_DPD;
7570 7571 7572 7573 7574
			break;
		default:
			MISSING_CASE(port);
			break;
		}
7575 7576
	}

7577 7578
	pp_on |= port_sel;

7579 7580
	intel_de_write(dev_priv, regs.pp_on, pp_on);
	intel_de_write(dev_priv, regs.pp_off, pp_off);
7581 7582 7583 7584 7585

	/*
	 * Compute the divisor for the pp clock, simply match the Bspec formula.
	 */
	if (i915_mmio_reg_valid(regs.pp_div)) {
7586 7587
		intel_de_write(dev_priv, regs.pp_div,
			       REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
7588 7589 7590
	} else {
		u32 pp_ctl;

7591
		pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
7592
		pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
7593
		pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
7594
		intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
7595
	}
7596

7597 7598
	drm_dbg_kms(&dev_priv->drm,
		    "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
7599 7600
		    intel_de_read(dev_priv, regs.pp_on),
		    intel_de_read(dev_priv, regs.pp_off),
7601
		    i915_mmio_reg_valid(regs.pp_div) ?
7602 7603
		    intel_de_read(dev_priv, regs.pp_div) :
		    (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
7604 7605
}

7606
static void intel_dp_pps_init(struct intel_dp *intel_dp)
7607
{
7608
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7609 7610

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7611 7612
		vlv_initial_power_sequencer_setup(intel_dp);
	} else {
7613 7614
		intel_dp_init_panel_power_sequencer(intel_dp);
		intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
7615 7616 7617
	}
}

7618 7619
/**
 * intel_dp_set_drrs_state - program registers for RR switch to take effect
7620
 * @dev_priv: i915 device
7621
 * @crtc_state: a pointer to the active intel_crtc_state
7622 7623 7624 7625 7626 7627 7628 7629 7630
 * @refresh_rate: RR to be programmed
 *
 * This function gets called when refresh rate (RR) has to be changed from
 * one frequency to another. Switches can be between high and low RR
 * supported by the panel or to any other RR based on media playback (in
 * this case, RR value needs to be passed from user space).
 *
 * The caller of this function needs to take a lock on dev_priv->drrs.
 */
7631
static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
7632
				    const struct intel_crtc_state *crtc_state,
7633
				    int refresh_rate)
7634
{
7635
	struct intel_dp *intel_dp = dev_priv->drrs.dp;
7636
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
7637
	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
7638 7639

	if (refresh_rate <= 0) {
7640 7641
		drm_dbg_kms(&dev_priv->drm,
			    "Refresh rate should be positive non-zero.\n");
7642 7643 7644
		return;
	}

7645
	if (intel_dp == NULL) {
7646
		drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
7647 7648 7649 7650
		return;
	}

	if (!intel_crtc) {
7651 7652
		drm_dbg_kms(&dev_priv->drm,
			    "DRRS: intel_crtc not initialized\n");
7653 7654 7655
		return;
	}

7656
	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
7657
		drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
7658 7659 7660
		return;
	}

V
Ville Syrjälä 已提交
7661
	if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
7662
			refresh_rate)
7663 7664
		index = DRRS_LOW_RR;

7665
	if (index == dev_priv->drrs.refresh_rate_type) {
7666 7667
		drm_dbg_kms(&dev_priv->drm,
			    "DRRS requested for previously set RR...ignoring\n");
7668 7669 7670
		return;
	}

7671
	if (!crtc_state->hw.active) {
7672 7673
		drm_dbg_kms(&dev_priv->drm,
			    "eDP encoder disabled. CRTC not Active\n");
7674 7675 7676
		return;
	}

7677
	if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
7678 7679
		switch (index) {
		case DRRS_HIGH_RR:
7680
			intel_dp_set_m_n(crtc_state, M1_N1);
7681 7682
			break;
		case DRRS_LOW_RR:
7683
			intel_dp_set_m_n(crtc_state, M2_N2);
7684 7685 7686
			break;
		case DRRS_MAX_RR:
		default:
7687 7688
			drm_err(&dev_priv->drm,
				"Unsupported refreshrate type\n");
7689
		}
7690 7691
	} else if (INTEL_GEN(dev_priv) > 6) {
		i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
7692
		u32 val;
7693

7694
		val = intel_de_read(dev_priv, reg);
7695
		if (index > DRRS_HIGH_RR) {
7696
			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7697 7698 7699
				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
			else
				val |= PIPECONF_EDP_RR_MODE_SWITCH;
7700
		} else {
7701
			if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7702 7703 7704
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
			else
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
7705
		}
7706
		intel_de_write(dev_priv, reg, val);
7707 7708
	}

7709 7710
	dev_priv->drrs.refresh_rate_type = index;

7711 7712
	drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
		    refresh_rate);
7713 7714
}

7715 7716 7717
/**
 * intel_edp_drrs_enable - init drrs struct if supported
 * @intel_dp: DP struct
7718
 * @crtc_state: A pointer to the active crtc state.
7719 7720 7721
 *
 * Initializes frontbuffer_bits and drrs.dp
 */
7722
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
7723
			   const struct intel_crtc_state *crtc_state)
V
Vandana Kannan 已提交
7724
{
7725
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
V
Vandana Kannan 已提交
7726

7727
	if (!crtc_state->has_drrs) {
7728
		drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
V
Vandana Kannan 已提交
7729 7730 7731
		return;
	}

7732
	if (dev_priv->psr.enabled) {
7733 7734
		drm_dbg_kms(&dev_priv->drm,
			    "PSR enabled. Not enabling DRRS.\n");
7735 7736 7737
		return;
	}

V
Vandana Kannan 已提交
7738
	mutex_lock(&dev_priv->drrs.mutex);
7739
	if (dev_priv->drrs.dp) {
7740
		drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
V
Vandana Kannan 已提交
7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751
		goto unlock;
	}

	dev_priv->drrs.busy_frontbuffer_bits = 0;

	dev_priv->drrs.dp = intel_dp;

unlock:
	mutex_unlock(&dev_priv->drrs.mutex);
}

7752 7753 7754
/**
 * intel_edp_drrs_disable - Disable DRRS
 * @intel_dp: DP struct
7755
 * @old_crtc_state: Pointer to old crtc_state.
7756 7757
 *
 */
7758
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
7759
			    const struct intel_crtc_state *old_crtc_state)
V
Vandana Kannan 已提交
7760
{
7761
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
V
Vandana Kannan 已提交
7762

7763
	if (!old_crtc_state->has_drrs)
V
Vandana Kannan 已提交
7764 7765 7766 7767 7768 7769 7770 7771 7772
		return;

	mutex_lock(&dev_priv->drrs.mutex);
	if (!dev_priv->drrs.dp) {
		mutex_unlock(&dev_priv->drrs.mutex);
		return;
	}

	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7773
		intel_dp_set_drrs_state(dev_priv, old_crtc_state,
V
Ville Syrjälä 已提交
7774
			drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
V
Vandana Kannan 已提交
7775 7776 7777 7778 7779 7780 7781

	dev_priv->drrs.dp = NULL;
	mutex_unlock(&dev_priv->drrs.mutex);

	cancel_delayed_work_sync(&dev_priv->drrs.work);
}

7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794
static void intel_edp_drrs_downclock_work(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv), drrs.work.work);
	struct intel_dp *intel_dp;

	mutex_lock(&dev_priv->drrs.mutex);

	intel_dp = dev_priv->drrs.dp;

	if (!intel_dp)
		goto unlock;

7795
	/*
7796 7797
	 * The delayed work can race with an invalidate hence we need to
	 * recheck.
7798 7799
	 */

7800 7801
	if (dev_priv->drrs.busy_frontbuffer_bits)
		goto unlock;
7802

7803 7804 7805 7806
	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
		struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;

		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
V
Ville Syrjälä 已提交
7807
			drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
7808
	}
7809

7810 7811
unlock:
	mutex_unlock(&dev_priv->drrs.mutex);
7812 7813
}

7814
/**
7815
 * intel_edp_drrs_invalidate - Disable Idleness DRRS
7816
 * @dev_priv: i915 device
7817 7818
 * @frontbuffer_bits: frontbuffer plane tracking bits
 *
7819 7820
 * This function gets called everytime rendering on the given planes start.
 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
7821 7822 7823
 *
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
 */
7824 7825
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
			       unsigned int frontbuffer_bits)
7826
{
7827
	struct intel_dp *intel_dp;
7828 7829 7830
	struct drm_crtc *crtc;
	enum pipe pipe;

7831
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7832 7833
		return;

7834
	cancel_delayed_work(&dev_priv->drrs.work);
7835

7836
	mutex_lock(&dev_priv->drrs.mutex);
7837 7838 7839

	intel_dp = dev_priv->drrs.dp;
	if (!intel_dp) {
7840 7841 7842 7843
		mutex_unlock(&dev_priv->drrs.mutex);
		return;
	}

7844
	crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7845 7846
	pipe = to_intel_crtc(crtc)->pipe;

7847 7848 7849
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;

7850
	/* invalidate means busy screen hence upclock */
7851
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7852
		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
V
Ville Syrjälä 已提交
7853
					drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
7854 7855 7856 7857

	mutex_unlock(&dev_priv->drrs.mutex);
}

7858
/**
7859
 * intel_edp_drrs_flush - Restart Idleness DRRS
7860
 * @dev_priv: i915 device
7861 7862
 * @frontbuffer_bits: frontbuffer plane tracking bits
 *
7863 7864 7865 7866
 * This function gets called every time rendering on the given planes has
 * completed or flip on a crtc is completed. So DRRS should be upclocked
 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
 * if no other planes are dirty.
7867 7868 7869
 *
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
 */
7870 7871
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
			  unsigned int frontbuffer_bits)
7872
{
7873
	struct intel_dp *intel_dp;
7874 7875 7876
	struct drm_crtc *crtc;
	enum pipe pipe;

7877
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7878 7879
		return;

7880
	cancel_delayed_work(&dev_priv->drrs.work);
7881

7882
	mutex_lock(&dev_priv->drrs.mutex);
7883 7884 7885

	intel_dp = dev_priv->drrs.dp;
	if (!intel_dp) {
7886 7887 7888 7889
		mutex_unlock(&dev_priv->drrs.mutex);
		return;
	}

7890
	crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7891
	pipe = to_intel_crtc(crtc)->pipe;
7892 7893

	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7894 7895
	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;

7896
	/* flush means busy screen hence upclock */
7897
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7898
		intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
V
Ville Syrjälä 已提交
7899
					drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
7900 7901 7902 7903 7904 7905

	/*
	 * flush also means no more activity hence schedule downclock, if all
	 * other fbs are quiescent too
	 */
	if (!dev_priv->drrs.busy_frontbuffer_bits)
7906 7907 7908 7909 7910
		schedule_delayed_work(&dev_priv->drrs.work,
				msecs_to_jiffies(1000));
	mutex_unlock(&dev_priv->drrs.mutex);
}

7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933
/**
 * DOC: Display Refresh Rate Switching (DRRS)
 *
 * Display Refresh Rate Switching (DRRS) is a power conservation feature
 * which enables swtching between low and high refresh rates,
 * dynamically, based on the usage scenario. This feature is applicable
 * for internal panels.
 *
 * Indication that the panel supports DRRS is given by the panel EDID, which
 * would list multiple refresh rates for one resolution.
 *
 * DRRS is of 2 types - static and seamless.
 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
 * (may appear as a blink on screen) and is used in dock-undock scenario.
 * Seamless DRRS involves changing RR without any visual effect to the user
 * and can be used during normal system usage. This is done by programming
 * certain registers.
 *
 * Support for static/seamless DRRS may be indicated in the VBT based on
 * inputs from the panel spec.
 *
 * DRRS saves power by switching to low RR based on usage scenarios.
 *
D
Daniel Vetter 已提交
7934 7935 7936 7937 7938 7939 7940 7941
 * The implementation is based on frontbuffer tracking implementation.  When
 * there is a disturbance on the screen triggered by user activity or a periodic
 * system activity, DRRS is disabled (RR is changed to high RR).  When there is
 * no movement on screen, after a timeout of 1 second, a switch to low RR is
 * made.
 *
 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
 * and intel_edp_drrs_flush() are called.
7942 7943 7944 7945 7946 7947 7948 7949
 *
 * DRRS can be further extended to support other internal panels and also
 * the scenario of video playback wherein RR is set based on the rate
 * requested by userspace.
 */

/**
 * intel_dp_drrs_init - Init basic DRRS work and mutex.
7950
 * @connector: eDP connector
7951 7952 7953 7954 7955 7956 7957 7958 7959 7960
 * @fixed_mode: preferred mode of panel
 *
 * This function is  called only once at driver load to initialize basic
 * DRRS stuff.
 *
 * Returns:
 * Downclock mode if panel supports it, else return NULL.
 * DRRS support is determined by the presence of downclock mode (apart
 * from VBT setting).
 */
7961
static struct drm_display_mode *
7962 7963
intel_dp_drrs_init(struct intel_connector *connector,
		   struct drm_display_mode *fixed_mode)
7964
{
7965
	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7966 7967
	struct drm_display_mode *downclock_mode = NULL;

7968 7969 7970
	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
	mutex_init(&dev_priv->drrs.mutex);

7971
	if (INTEL_GEN(dev_priv) <= 6) {
7972 7973
		drm_dbg_kms(&dev_priv->drm,
			    "DRRS supported for Gen7 and above\n");
7974 7975 7976 7977
		return NULL;
	}

	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7978
		drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
7979 7980 7981
		return NULL;
	}

7982
	downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7983
	if (!downclock_mode) {
7984 7985
		drm_dbg_kms(&dev_priv->drm,
			    "Downclock mode is not found. DRRS not supported\n");
7986 7987 7988
		return NULL;
	}

7989
	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7990

7991
	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7992 7993
	drm_dbg_kms(&dev_priv->drm,
		    "seamless DRRS supported for eDP panel.\n");
7994 7995 7996
	return downclock_mode;
}

7997
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7998
				     struct intel_connector *intel_connector)
7999
{
8000 8001
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct drm_device *dev = &dev_priv->drm;
8002
	struct drm_connector *connector = &intel_connector->base;
8003
	struct drm_display_mode *fixed_mode = NULL;
8004
	struct drm_display_mode *downclock_mode = NULL;
8005
	bool has_dpcd;
8006
	enum pipe pipe = INVALID_PIPE;
8007 8008
	intel_wakeref_t wakeref;
	struct edid *edid;
8009

8010
	if (!intel_dp_is_edp(intel_dp))
8011 8012
		return true;

8013 8014
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);

8015 8016 8017 8018 8019 8020
	/*
	 * On IBX/CPT we may get here with LVDS already registered. Since the
	 * driver uses the only internal power sequencer available for both
	 * eDP and LVDS bail out early in this case to prevent interfering
	 * with an already powered-on LVDS power sequencer.
	 */
8021
	if (intel_get_lvds_encoder(dev_priv)) {
8022 8023
		drm_WARN_ON(dev,
			    !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
8024 8025
		drm_info(&dev_priv->drm,
			 "LVDS was detected, not registering eDP\n");
8026 8027 8028 8029

		return false;
	}

8030 8031 8032 8033 8034
	with_pps_lock(intel_dp, wakeref) {
		intel_dp_init_panel_power_timestamps(intel_dp);
		intel_dp_pps_init(intel_dp);
		intel_edp_panel_vdd_sanitize(intel_dp);
	}
8035

8036
	/* Cache DPCD and EDID for edp. */
8037
	has_dpcd = intel_edp_init_dpcd(intel_dp);
8038

8039
	if (!has_dpcd) {
8040
		/* if this fails, presume the device is a ghost */
8041 8042
		drm_info(&dev_priv->drm,
			 "failed to retrieve link info, disabling eDP\n");
8043
		goto out_vdd_off;
8044 8045
	}

8046
	mutex_lock(&dev->mode_config.mutex);
8047
	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
8048 8049
	if (edid) {
		if (drm_add_edid_modes(connector, edid)) {
L
Lyude Paul 已提交
8050 8051
			drm_connector_update_edid_property(connector, edid);
			intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
8052 8053 8054 8055 8056 8057 8058 8059 8060
		} else {
			kfree(edid);
			edid = ERR_PTR(-EINVAL);
		}
	} else {
		edid = ERR_PTR(-ENOENT);
	}
	intel_connector->edid = edid;

8061 8062 8063
	fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
	if (fixed_mode)
		downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
8064 8065

	/* fallback to VBT if available for eDP */
8066 8067
	if (!fixed_mode)
		fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
8068
	mutex_unlock(&dev->mode_config.mutex);
8069

8070
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
8071 8072
		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
		register_reboot_notifier(&intel_dp->edp_notifier);
8073 8074 8075 8076 8077 8078

		/*
		 * Figure out the current pipe for the initial backlight setup.
		 * If the current pipe isn't valid, try the PPS pipe, and if that
		 * fails just assume pipe A.
		 */
8079
		pipe = vlv_active_pipe(intel_dp);
8080 8081 8082 8083 8084 8085 8086

		if (pipe != PIPE_A && pipe != PIPE_B)
			pipe = intel_dp->pps_pipe;

		if (pipe != PIPE_A && pipe != PIPE_B)
			pipe = PIPE_A;

8087 8088 8089
		drm_dbg_kms(&dev_priv->drm,
			    "using pipe %c for initial backlight setup\n",
			    pipe_name(pipe));
8090 8091
	}

8092
	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
8093
	intel_connector->panel.backlight.power = intel_edp_backlight_power;
8094
	intel_panel_setup_backlight(connector, pipe);
8095

8096 8097
	if (fixed_mode) {
		drm_connector_set_panel_orientation_with_quirk(connector,
8098
				dev_priv->vbt.orientation,
8099 8100
				fixed_mode->hdisplay, fixed_mode->vdisplay);
	}
8101

8102
	return true;
8103 8104 8105 8106 8107 8108 8109

out_vdd_off:
	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
	/*
	 * vdd might still be enabled do to the delayed vdd off.
	 * Make sure vdd is actually turned off here.
	 */
8110 8111
	with_pps_lock(intel_dp, wakeref)
		edp_panel_vdd_off_sync(intel_dp);
8112 8113

	return false;
8114 8115
}

8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131
static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
	struct intel_connector *intel_connector;
	struct drm_connector *connector;

	intel_connector = container_of(work, typeof(*intel_connector),
				       modeset_retry_work);
	connector = &intel_connector->base;
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
		      connector->name);

	/* Grab the locks before changing connector property*/
	mutex_lock(&connector->dev->mode_config.mutex);
	/* Set connector link status to BAD and send a Uevent to notify
	 * userspace to do a modeset.
	 */
8132 8133
	drm_connector_set_link_status_property(connector,
					       DRM_MODE_LINK_STATUS_BAD);
8134 8135 8136 8137 8138
	mutex_unlock(&connector->dev->mode_config.mutex);
	/* Send Hotplug uevent so userspace can reprobe */
	drm_kms_helper_hotplug_event(connector->dev);
}

8139
bool
8140
intel_dp_init_connector(struct intel_digital_port *dig_port,
8141
			struct intel_connector *intel_connector)
8142
{
8143
	struct drm_connector *connector = &intel_connector->base;
8144 8145
	struct intel_dp *intel_dp = &dig_port->dp;
	struct intel_encoder *intel_encoder = &dig_port->base;
8146
	struct drm_device *dev = intel_encoder->base.dev;
8147
	struct drm_i915_private *dev_priv = to_i915(dev);
8148
	enum port port = intel_encoder->port;
8149
	enum phy phy = intel_port_to_phy(dev_priv, port);
8150
	int type;
8151

8152 8153 8154 8155
	/* Initialize the work for modeset in case of link train failure */
	INIT_WORK(&intel_connector->modeset_retry_work,
		  intel_dp_modeset_retry_work_fn);

8156
	if (drm_WARN(dev, dig_port->max_lanes < 1,
8157
		     "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
8158
		     dig_port->max_lanes, intel_encoder->base.base.id,
8159
		     intel_encoder->base.name))
8160 8161
		return false;

8162
	intel_dp->reset_link_params = true;
8163
	intel_dp->pps_pipe = INVALID_PIPE;
8164
	intel_dp->active_pipe = INVALID_PIPE;
8165

8166
	/* Preserve the current hw state. */
8167
	intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
8168
	intel_dp->attached_connector = intel_connector;
8169

8170 8171 8172 8173 8174
	if (intel_dp_is_port_edp(dev_priv, port)) {
		/*
		 * Currently we don't support eDP on TypeC ports, although in
		 * theory it could work on TypeC legacy ports.
		 */
8175
		drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
8176
		type = DRM_MODE_CONNECTOR_eDP;
8177 8178 8179 8180 8181 8182 8183
		intel_encoder->type = INTEL_OUTPUT_EDP;

		/* eDP only on port B and/or C on vlv/chv */
		if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
				      IS_CHERRYVIEW(dev_priv)) &&
				port != PORT_B && port != PORT_C))
			return false;
8184
	} else {
8185
		type = DRM_MODE_CONNECTOR_DisplayPort;
8186
	}
8187

8188 8189
	intel_dp_set_source_rates(intel_dp);

8190 8191 8192
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		intel_dp->active_pipe = vlv_active_pipe(intel_dp);

8193 8194 8195 8196
	drm_dbg_kms(&dev_priv->drm,
		    "Adding %s connector on [ENCODER:%d:%s]\n",
		    type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
		    intel_encoder->base.base.id, intel_encoder->base.name);
8197

8198
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
8199 8200
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);

R
Rodrigo Vivi 已提交
8201
	if (!HAS_GMCH(dev_priv))
8202
		connector->interlace_allowed = true;
8203 8204
	connector->doublescan_allowed = 0;

8205 8206 8207
	if (INTEL_GEN(dev_priv) >= 11)
		connector->ycbcr_420_allowed = true;

8208
	intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
8209
	intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
8210

8211
	intel_dp_aux_init(intel_dp);
8212

8213
	intel_connector_attach_encoder(intel_connector, intel_encoder);
8214

8215
	if (HAS_DDI(dev_priv))
8216 8217 8218 8219
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
	else
		intel_connector->get_hw_state = intel_connector_get_hw_state;

8220
	/* init MST on ports that can support it */
8221
	intel_dp_mst_encoder_init(dig_port,
8222
				  intel_connector->base.base.id);
8223

8224
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
8225
		intel_dp_aux_fini(intel_dp);
8226
		intel_dp_mst_encoder_cleanup(dig_port);
8227
		goto fail;
8228
	}
8229

8230
	intel_dp_add_properties(intel_dp, connector);
8231

8232
	if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
8233 8234
		int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
		if (ret)
8235 8236
			drm_dbg_kms(&dev_priv->drm,
				    "HDCP init failed, skipping.\n");
8237
	}
8238

8239 8240 8241 8242
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
	 * 0xd.  Failure to do so will result in spurious interrupts being
	 * generated on the port when a cable is not attached.
	 */
8243
	if (IS_G45(dev_priv)) {
8244 8245 8246
		u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
		intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
			       (temp & ~0xf) | 0xd);
8247
	}
8248 8249

	return true;
8250 8251 8252 8253 8254

fail:
	drm_connector_cleanup(connector);

	return false;
8255
}
8256

8257
bool intel_dp_init(struct drm_i915_private *dev_priv,
8258 8259
		   i915_reg_t output_reg,
		   enum port port)
8260
{
8261
	struct intel_digital_port *dig_port;
8262 8263 8264 8265
	struct intel_encoder *intel_encoder;
	struct drm_encoder *encoder;
	struct intel_connector *intel_connector;

8266 8267
	dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
	if (!dig_port)
8268
		return false;
8269

8270
	intel_connector = intel_connector_alloc();
S
Sudip Mukherjee 已提交
8271 8272
	if (!intel_connector)
		goto err_connector_alloc;
8273

8274
	intel_encoder = &dig_port->base;
8275 8276
	encoder = &intel_encoder->base;

8277 8278 8279
	if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
			     &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
			     "DP %c", port_name(port)))
S
Sudip Mukherjee 已提交
8280
		goto err_encoder_init;
8281

8282
	intel_encoder->hotplug = intel_dp_hotplug;
8283
	intel_encoder->compute_config = intel_dp_compute_config;
P
Paulo Zanoni 已提交
8284
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
8285
	intel_encoder->get_config = intel_dp_get_config;
8286
	intel_encoder->update_pipe = intel_panel_update_backlight;
8287
	intel_encoder->suspend = intel_dp_encoder_suspend;
8288
	if (IS_CHERRYVIEW(dev_priv)) {
8289
		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
8290 8291
		intel_encoder->pre_enable = chv_pre_enable_dp;
		intel_encoder->enable = vlv_enable_dp;
8292
		intel_encoder->disable = vlv_disable_dp;
8293
		intel_encoder->post_disable = chv_post_disable_dp;
8294
		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
8295
	} else if (IS_VALLEYVIEW(dev_priv)) {
8296
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
8297 8298
		intel_encoder->pre_enable = vlv_pre_enable_dp;
		intel_encoder->enable = vlv_enable_dp;
8299
		intel_encoder->disable = vlv_disable_dp;
8300
		intel_encoder->post_disable = vlv_post_disable_dp;
8301
	} else {
8302 8303
		intel_encoder->pre_enable = g4x_pre_enable_dp;
		intel_encoder->enable = g4x_enable_dp;
8304
		intel_encoder->disable = g4x_disable_dp;
8305
		intel_encoder->post_disable = g4x_post_disable_dp;
8306
	}
8307

8308 8309
	if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
	    (HAS_PCH_CPT(dev_priv) && port != PORT_A))
8310
		dig_port->dp.set_link_train = cpt_set_link_train;
8311
	else
8312
		dig_port->dp.set_link_train = g4x_set_link_train;
8313

8314
	if (IS_CHERRYVIEW(dev_priv))
8315
		dig_port->dp.set_signal_levels = chv_set_signal_levels;
8316
	else if (IS_VALLEYVIEW(dev_priv))
8317
		dig_port->dp.set_signal_levels = vlv_set_signal_levels;
8318
	else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
8319
		dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
8320
	else if (IS_GEN(dev_priv, 6) && port == PORT_A)
8321
		dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
8322
	else
8323
		dig_port->dp.set_signal_levels = g4x_set_signal_levels;
8324

8325 8326
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
	    (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
8327 8328
		dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
		dig_port->dp.voltage_max = intel_dp_voltage_max_3;
8329
	} else {
8330 8331
		dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
		dig_port->dp.voltage_max = intel_dp_voltage_max_2;
8332 8333
	}

8334 8335 8336 8337
	dig_port->dp.output_reg = output_reg;
	dig_port->max_lanes = 4;
	dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
	dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
8338

8339
	intel_encoder->type = INTEL_OUTPUT_DP;
8340
	intel_encoder->power_domain = intel_port_to_power_domain(port);
8341
	if (IS_CHERRYVIEW(dev_priv)) {
8342
		if (port == PORT_D)
V
Ville Syrjälä 已提交
8343
			intel_encoder->pipe_mask = BIT(PIPE_C);
8344
		else
V
Ville Syrjälä 已提交
8345
			intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
8346
	} else {
8347
		intel_encoder->pipe_mask = ~0;
8348
	}
8349
	intel_encoder->cloneable = 0;
8350
	intel_encoder->port = port;
8351

8352
	dig_port->hpd_pulse = intel_dp_hpd_pulse;
8353

8354 8355
	if (HAS_GMCH(dev_priv)) {
		if (IS_GM45(dev_priv))
8356
			dig_port->connected = gm45_digital_port_connected;
8357
		else
8358
			dig_port->connected = g4x_digital_port_connected;
8359
	} else {
8360
		if (port == PORT_A)
8361
			dig_port->connected = ilk_digital_port_connected;
8362
		else
8363
			dig_port->connected = ibx_digital_port_connected;
8364 8365
	}

8366
	if (port != PORT_A)
8367
		intel_infoframe_init(dig_port);
8368

8369 8370
	dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
	if (!intel_dp_init_connector(dig_port, intel_connector))
S
Sudip Mukherjee 已提交
8371 8372
		goto err_init_connector;

8373
	return true;
S
Sudip Mukherjee 已提交
8374 8375 8376

err_init_connector:
	drm_encoder_cleanup(encoder);
S
Sudip Mukherjee 已提交
8377
err_encoder_init:
S
Sudip Mukherjee 已提交
8378 8379
	kfree(intel_connector);
err_connector_alloc:
8380
	kfree(dig_port);
8381
	return false;
8382
}
8383

8384
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
8385
{
8386 8387 8388 8389
	struct intel_encoder *encoder;

	for_each_intel_encoder(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp;
8390

8391 8392
		if (encoder->type != INTEL_OUTPUT_DDI)
			continue;
8393

8394
		intel_dp = enc_to_intel_dp(encoder);
8395

8396
		if (!intel_dp->can_mst)
8397 8398
			continue;

8399 8400
		if (intel_dp->is_mst)
			drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
8401 8402 8403
	}
}

8404
void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
8405
{
8406
	struct intel_encoder *encoder;
8407

8408 8409
	for_each_intel_encoder(&dev_priv->drm, encoder) {
		struct intel_dp *intel_dp;
8410
		int ret;
8411

8412 8413 8414
		if (encoder->type != INTEL_OUTPUT_DDI)
			continue;

8415
		intel_dp = enc_to_intel_dp(encoder);
8416 8417

		if (!intel_dp->can_mst)
8418
			continue;
8419

8420 8421
		ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
						     true);
8422 8423 8424 8425 8426
		if (ret) {
			intel_dp->is_mst = false;
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
							false);
		}
8427 8428
	}
}