intel_psr.c 38.9 KB
Newer Older
R
Rodrigo Vivi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

24 25
#include <drm/drm_atomic_helper.h>

26 27
#include "display/intel_dp.h"

28
#include "i915_drv.h"
29
#include "intel_display_types.h"
30
#include "intel_psr.h"
31
#include "intel_sprite.h"
32

R
Rodrigo Vivi 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/**
 * DOC: Panel Self Refresh (PSR/SRD)
 *
 * Since Haswell Display controller supports Panel Self-Refresh on display
 * panels witch have a remote frame buffer (RFB) implemented according to PSR
 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
 * when system is idle but display is on as it eliminates display refresh
 * request to DDR memory completely as long as the frame buffer for that
 * display is unchanged.
 *
 * Panel Self Refresh must be supported by both Hardware (source) and
 * Panel (sink).
 *
 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
 * to power down the link and memory controller. For DSI panels the same idea
 * is called "manual mode".
 *
 * The implementation uses the hardware-based PSR support which automatically
 * enters/exits self-refresh mode. The hardware takes care of sending the
 * required DP aux message and could even retrain the link (that part isn't
 * enabled yet though). The hardware also keeps track of any frontbuffer
 * changes to know when to exit self-refresh mode again. Unfortunately that
 * part doesn't work too well, hence why the i915 PSR support uses the
 * software frontbuffer tracking to make sure it doesn't miss a screen
 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
 * get called by the frontbuffer tracking code. Note that because of locking
 * issues the self-refresh re-enable code is done from a work queue, which
 * must be correctly synchronized/cancelled when shutting down the pipe."
 */

63 64 65 66 67 68 69 70 71 72 73 74
static bool psr_global_enabled(u32 debug)
{
	switch (debug & I915_PSR_DEBUG_MODE_MASK) {
	case I915_PSR_DEBUG_DEFAULT:
		return i915_modparams.enable_psr;
	case I915_PSR_DEBUG_DISABLE:
		return false;
	default:
		return true;
	}
}

75 76 77
static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
			       const struct intel_crtc_state *crtc_state)
{
78 79 80 81
	/* Cannot enable DSC and PSR2 simultaneously */
	WARN_ON(crtc_state->dsc_params.compression_enable &&
		crtc_state->has_psr2);

82
	switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
83
	case I915_PSR_DEBUG_DISABLE:
84 85 86 87 88 89 90
	case I915_PSR_DEBUG_FORCE_PSR1:
		return false;
	default:
		return crtc_state->has_psr2;
	}
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
static int edp_psr_shift(enum transcoder cpu_transcoder)
{
	switch (cpu_transcoder) {
	case TRANSCODER_A:
		return EDP_PSR_TRANSCODER_A_SHIFT;
	case TRANSCODER_B:
		return EDP_PSR_TRANSCODER_B_SHIFT;
	case TRANSCODER_C:
		return EDP_PSR_TRANSCODER_C_SHIFT;
	default:
		MISSING_CASE(cpu_transcoder);
		/* fallthrough */
	case TRANSCODER_EDP:
		return EDP_PSR_TRANSCODER_EDP_SHIFT;
	}
}

108
static void psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
109 110
{
	u32 debug_mask, mask;
111 112
	enum transcoder cpu_transcoder;
	u32 transcoders = BIT(TRANSCODER_EDP);
113

114 115 116 117 118 119 120 121 122 123 124 125 126
	if (INTEL_GEN(dev_priv) >= 8)
		transcoders |= BIT(TRANSCODER_A) |
			       BIT(TRANSCODER_B) |
			       BIT(TRANSCODER_C);

	debug_mask = 0;
	mask = 0;
	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
		int shift = edp_psr_shift(cpu_transcoder);

		mask |= EDP_PSR_ERROR(shift);
		debug_mask |= EDP_PSR_POST_EXIT(shift) |
			      EDP_PSR_PRE_ENTRY(shift);
127 128
	}

129
	if (debug & I915_PSR_DEBUG_IRQ)
130 131 132 133 134
		mask |= debug_mask;

	I915_WRITE(EDP_PSR_IMR, ~mask);
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
static void psr_event_print(u32 val, bool psr2_enabled)
{
	DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
		DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
		DRM_DEBUG_KMS("\tPSR2 disabled\n");
	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
		DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
		DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
	if (val & PSR_EVENT_GRAPHICS_RESET)
		DRM_DEBUG_KMS("\tGraphics reset\n");
	if (val & PSR_EVENT_PCH_INTERRUPT)
		DRM_DEBUG_KMS("\tPCH interrupt\n");
	if (val & PSR_EVENT_MEMORY_UP)
		DRM_DEBUG_KMS("\tMemory up\n");
	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
		DRM_DEBUG_KMS("\tFront buffer modification\n");
	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
		DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
		DRM_DEBUG_KMS("\tPIPE registers updated\n");
	if (val & PSR_EVENT_REGISTER_UPDATE)
		DRM_DEBUG_KMS("\tRegister updated\n");
	if (val & PSR_EVENT_HDCP_ENABLE)
		DRM_DEBUG_KMS("\tHDCP enabled\n");
	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
		DRM_DEBUG_KMS("\tKVMR session enabled\n");
	if (val & PSR_EVENT_VBI_ENABLE)
		DRM_DEBUG_KMS("\tVBI enabled\n");
	if (val & PSR_EVENT_LPSP_MODE_EXIT)
		DRM_DEBUG_KMS("\tLPSP mode exited\n");
	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
		DRM_DEBUG_KMS("\tPSR disabled\n");
}

172 173 174 175
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
{
	u32 transcoders = BIT(TRANSCODER_EDP);
	enum transcoder cpu_transcoder;
176
	ktime_t time_ns =  ktime_get();
177
	u32 mask = 0;
178 179 180 181 182 183 184

	if (INTEL_GEN(dev_priv) >= 8)
		transcoders |= BIT(TRANSCODER_A) |
			       BIT(TRANSCODER_B) |
			       BIT(TRANSCODER_C);

	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
185 186
		int shift = edp_psr_shift(cpu_transcoder);

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
		if (psr_iir & EDP_PSR_ERROR(shift)) {
			DRM_WARN("[transcoder %s] PSR aux error\n",
				 transcoder_name(cpu_transcoder));

			dev_priv->psr.irq_aux_error = true;

			/*
			 * If this interruption is not masked it will keep
			 * interrupting so fast that it prevents the scheduled
			 * work to run.
			 * Also after a PSR error, we don't want to arm PSR
			 * again so we don't care about unmask the interruption
			 * or unset irq_aux_error.
			 */
			mask |= EDP_PSR_ERROR(shift);
		}
203

204
		if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
205
			dev_priv->psr.last_entry_attempt = time_ns;
206 207
			DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
				      transcoder_name(cpu_transcoder));
208
		}
209

210
		if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
211
			dev_priv->psr.last_exit = time_ns;
212 213
			DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
				      transcoder_name(cpu_transcoder));
214 215 216 217 218 219 220 221

			if (INTEL_GEN(dev_priv) >= 9) {
				u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
				bool psr2_enabled = dev_priv->psr.psr2_enabled;

				I915_WRITE(PSR_EVENT(cpu_transcoder), val);
				psr_event_print(val, psr2_enabled);
			}
222
		}
223
	}
224 225 226 227 228 229 230

	if (mask) {
		mask |= I915_READ(EDP_PSR_IMR);
		I915_WRITE(EDP_PSR_IMR, mask);

		schedule_work(&dev_priv->psr.work);
	}
231 232
}

233 234
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
235
	u8 alpm_caps = 0;
236 237 238 239 240 241 242

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
			      &alpm_caps) != 1)
		return false;
	return alpm_caps & DP_ALPM_CAP;
}

243 244
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
245
	u8 val = 8; /* assume the worst if we can't read the value */
246 247 248 249 250

	if (drm_dp_dpcd_readb(&intel_dp->aux,
			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
	else
251
		DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
252 253 254
	return val;
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
{
	u16 val;
	ssize_t r;

	/*
	 * Returning the default X granularity if granularity not required or
	 * if DPCD read fails
	 */
	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
		return 4;

	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
	if (r != 2)
		DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");

	/*
	 * Spec says that if the value read is 0 the default granularity should
	 * be used instead.
	 */
	if (r != 2 || val == 0)
		val = 4;

	return val;
}

281 282 283 284 285
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);

286 287 288 289 290
	if (dev_priv->psr.dp) {
		DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
		return;
	}

291 292 293
	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
			 sizeof(intel_dp->psr_dpcd));

294 295 296 297
	if (!intel_dp->psr_dpcd[0])
		return;
	DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
		      intel_dp->psr_dpcd[0]);
298

299 300 301 302 303
	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
		DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
		return;
	}

304 305 306 307
	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
		DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
		return;
	}
308

309
	dev_priv->psr.sink_support = true;
310 311
	dev_priv->psr.sink_sync_latency =
		intel_dp_get_sink_sync_latency(intel_dp);
312

313 314
	dev_priv->psr.dp = intel_dp;

315
	if (INTEL_GEN(dev_priv) >= 9 &&
316
	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
317 318 319 320
		bool y_req = intel_dp->psr_dpcd[1] &
			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
		bool alpm = intel_dp_get_alpm_status(intel_dp);

321 322 323 324 325 326 327 328 329 330 331
		/*
		 * All panels that supports PSR version 03h (PSR2 +
		 * Y-coordinate) can handle Y-coordinates in VSC but we are
		 * only sure that it is going to be used when required by the
		 * panel. This way panel is capable to do selective update
		 * without a aux frame sync.
		 *
		 * To support PSR version 02h and PSR version 03h without
		 * Y-coordinate requirement panels we would need to enable
		 * GTC first.
		 */
332
		dev_priv->psr.sink_psr2_support = y_req && alpm;
333 334
		DRM_DEBUG_KMS("PSR2 %ssupported\n",
			      dev_priv->psr.sink_psr2_support ? "" : "not ");
335

336
		if (dev_priv->psr.sink_psr2_support) {
337 338
			dev_priv->psr.colorimetry_support =
				intel_dp_get_colorimetry_status(intel_dp);
339 340
			dev_priv->psr.su_x_granularity =
				intel_dp_get_su_x_granulartiy(intel_dp);
341 342 343 344
		}
	}
}

345 346
static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
				const struct intel_crtc_state *crtc_state)
347
{
348
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
349
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
350
	struct dp_sdp psr_vsc;
351

352
	if (dev_priv->psr.psr2_enabled) {
353 354 355 356
		/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
		memset(&psr_vsc, 0, sizeof(psr_vsc));
		psr_vsc.sdp_header.HB0 = 0;
		psr_vsc.sdp_header.HB1 = 0x7;
357
		if (dev_priv->psr.colorimetry_support) {
358 359
			psr_vsc.sdp_header.HB2 = 0x5;
			psr_vsc.sdp_header.HB3 = 0x13;
360
		} else {
361 362 363
			psr_vsc.sdp_header.HB2 = 0x4;
			psr_vsc.sdp_header.HB3 = 0xe;
		}
364
	} else {
365 366 367 368 369 370
		/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
		memset(&psr_vsc, 0, sizeof(psr_vsc));
		psr_vsc.sdp_header.HB0 = 0;
		psr_vsc.sdp_header.HB1 = 0x7;
		psr_vsc.sdp_header.HB2 = 0x2;
		psr_vsc.sdp_header.HB3 = 0x8;
371 372
	}

373 374
	intel_dig_port->write_infoframe(&intel_dig_port->base,
					crtc_state,
375
					DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
376 377
}

378
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
379
{
380
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
381 382
	u32 aux_clock_divider, aux_ctl;
	int i;
383
	static const u8 aux_msg[] = {
R
Rodrigo Vivi 已提交
384 385 386 387 388 389
		[0] = DP_AUX_NATIVE_WRITE << 4,
		[1] = DP_SET_POWER >> 8,
		[2] = DP_SET_POWER & 0xff,
		[3] = 1 - 1,
		[4] = DP_SET_POWER_D0,
	};
390 391 392 393
	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
R
Rodrigo Vivi 已提交
394 395

	BUILD_BUG_ON(sizeof(aux_msg) > 20);
396
	for (i = 0; i < sizeof(aux_msg); i += 4)
397
		I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
398 399
			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));

400 401 402
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);

	/* Start with bits set for DDI_AUX_CTL register */
403
	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
404
					     aux_clock_divider);
405 406 407

	/* Select only valid bits for SRD_AUX_CTL */
	aux_ctl &= psr_aux_mask;
408
	I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
409 410
}

411
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
412
{
413
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
414
	u8 dpcd_val = DP_PSR_ENABLE;
415

416
	/* Enable ALPM at sink for psr2 */
417 418 419
	if (dev_priv->psr.psr2_enabled) {
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
				   DP_ALPM_ENABLE);
420
		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
421 422 423
	} else {
		if (dev_priv->psr.link_standby)
			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
424 425 426

		if (INTEL_GEN(dev_priv) >= 8)
			dpcd_val |= DP_PSR_CRC_VERIFICATION;
427 428
	}

429
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
430

431
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
R
Rodrigo Vivi 已提交
432 433
}

434
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
435
{
436
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
437
	u32 val = 0;
438

439 440 441
	if (INTEL_GEN(dev_priv) >= 11)
		val |= EDP_PSR_TP4_TIME_0US;

442
	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
443
		val |= EDP_PSR_TP1_TIME_0us;
444
	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
445
		val |= EDP_PSR_TP1_TIME_100us;
446 447
	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
		val |= EDP_PSR_TP1_TIME_500us;
448
	else
449
		val |= EDP_PSR_TP1_TIME_2500us;
450

451
	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
452
		val |= EDP_PSR_TP2_TP3_TIME_0us;
453
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
454
		val |= EDP_PSR_TP2_TP3_TIME_100us;
455 456
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
		val |= EDP_PSR_TP2_TP3_TIME_500us;
457
	else
458
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
459 460 461 462 463 464 465

	if (intel_dp_source_supports_hbr2(intel_dp) &&
	    drm_dp_tps3_supported(intel_dp->dpcd))
		val |= EDP_PSR_TP1_TP3_SEL;
	else
		val |= EDP_PSR_TP1_TP2_SEL;

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	return val;
}

static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 max_sleep_time = 0x1f;
	u32 val = EDP_PSR_ENABLE;

	/* Let's use 6 as the minimum to cover all known cases including the
	 * off-by-one issue that HW has in some cases.
	 */
	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);

	/* sink_sync_latency of 8 means source has to wait for more than 8
	 * frames, we'll go with 9 frames for now
	 */
	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;

	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
	if (IS_HASWELL(dev_priv))
		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;

	if (dev_priv->psr.link_standby)
		val |= EDP_PSR_LINK_STANDBY;

	val |= intel_psr1_get_tp_time(intel_dp);

495 496 497
	if (INTEL_GEN(dev_priv) >= 8)
		val |= EDP_PSR_CRC_ENABLE;

498 499 500
	val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
	I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
501
}
502

R
Rodrigo Vivi 已提交
503
static void hsw_activate_psr2(struct intel_dp *intel_dp)
504
{
505
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
506 507 508 509
	u32 val;

	/* Let's use 6 as the minimum to cover all known cases including the
	 * off-by-one issue that HW has in some cases.
510
	 */
511 512 513 514
	int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);

	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
	val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
515

516
	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
517 518
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
		val |= EDP_Y_COORDINATE_ENABLE;
519

520
	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
521

522 523
	if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
524
		val |= EDP_PSR2_TP2_TIME_50us;
525
	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
526
		val |= EDP_PSR2_TP2_TIME_100us;
527
	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
528
		val |= EDP_PSR2_TP2_TIME_500us;
529
	else
530
		val |= EDP_PSR2_TP2_TIME_2500us;
531

532
	/*
533 534
	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
	 * recommending keep this bit unset while PSR2 is enabled.
535
	 */
536
	I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
537

538
	I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
R
Rodrigo Vivi 已提交
539 540
}

541 542 543
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
544 545 546
	if (INTEL_GEN(dev_priv) < 9)
		return false;
	else if (INTEL_GEN(dev_priv) >= 12)
547 548 549 550 551
		return trans == TRANSCODER_A;
	else
		return trans == TRANSCODER_EDP;
}

552 553 554
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
				    struct intel_crtc_state *crtc_state)
{
555
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
556 557 558
	int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
	int psr_max_h = 0, psr_max_v = 0;
559

560
	if (!dev_priv->psr.sink_psr2_support)
561 562
		return false;

563 564 565 566 567 568
	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
		DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
			      transcoder_name(crtc_state->cpu_transcoder));
		return false;
	}

569 570 571 572 573 574 575 576 577 578
	/*
	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
	 * resolution requires DSC to be enabled, priority is given to DSC
	 * over PSR2.
	 */
	if (crtc_state->dsc_params.compression_enable) {
		DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
		return false;
	}

579 580 581 582
	if (INTEL_GEN(dev_priv) >= 12) {
		psr_max_h = 5120;
		psr_max_v = 3200;
	} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
583 584
		psr_max_h = 4096;
		psr_max_v = 2304;
585
	} else if (IS_GEN(dev_priv, 9)) {
586 587 588 589 590 591 592 593
		psr_max_h = 3640;
		psr_max_v = 2304;
	}

	if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
		DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
			      crtc_hdisplay, crtc_vdisplay,
			      psr_max_h, psr_max_v);
594 595 596
		return false;
	}

597 598 599
	/*
	 * HW sends SU blocks of size four scan lines, which means the starting
	 * X coordinate and Y granularity requirements will always be met. We
600 601
	 * only need to validate the SU block width is a multiple of
	 * x granularity.
602
	 */
603 604 605
	if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
		DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
			      crtc_hdisplay, dev_priv->psr.su_x_granularity);
606 607 608
		return false;
	}

609 610 611 612 613
	if (crtc_state->crc_enabled) {
		DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
		return false;
	}

614 615 616
	return true;
}

617 618
void intel_psr_compute_config(struct intel_dp *intel_dp,
			      struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
619 620
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
621
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
622
	const struct drm_display_mode *adjusted_mode =
623
		&crtc_state->base.adjusted_mode;
624
	int psr_setup_time;
R
Rodrigo Vivi 已提交
625

626
	if (!CAN_PSR(dev_priv))
627 628
		return;

629
	if (intel_dp != dev_priv->psr.dp)
630
		return;
R
Rodrigo Vivi 已提交
631

632 633
	/*
	 * HSW spec explicitly says PSR is tied to port A.
634 635 636
	 * BDW+ platforms have a instance of PSR registers per transcoder but
	 * for now it only supports one instance of PSR, so lets keep it
	 * hardcoded to PORT_A
637
	 */
638
	if (dig_port->base.port != PORT_A) {
639
		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
640
		return;
R
Rodrigo Vivi 已提交
641 642
	}

643 644 645 646 647
	if (dev_priv->psr.sink_not_reliable) {
		DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
		return;
	}

648 649
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
		DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
650
		return;
R
Rodrigo Vivi 已提交
651 652
	}

653 654 655 656
	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
	if (psr_setup_time < 0) {
		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
			      intel_dp->psr_dpcd[1]);
657
		return;
658 659 660 661 662 663
	}

	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
			      psr_setup_time);
664 665 666 667
		return;
	}

	crtc_state->has_psr = true;
668
	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
R
Rodrigo Vivi 已提交
669 670
}

671
static void intel_psr_activate(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
672
{
673
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
R
Rodrigo Vivi 已提交
674

675
	if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
676
		WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
677

678
	WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
R
Rodrigo Vivi 已提交
679 680 681
	WARN_ON(dev_priv->psr.active);
	lockdep_assert_held(&dev_priv->psr.lock);

682 683 684 685 686 687
	/* psr1 and psr2 are mutually exclusive.*/
	if (dev_priv->psr.psr2_enabled)
		hsw_activate_psr2(intel_dp);
	else
		hsw_activate_psr1(intel_dp);

R
Rodrigo Vivi 已提交
688 689 690
	dev_priv->psr.active = true;
}

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
					 enum transcoder cpu_transcoder)
{
	static const i915_reg_t regs[] = {
		[TRANSCODER_A] = CHICKEN_TRANS_A,
		[TRANSCODER_B] = CHICKEN_TRANS_B,
		[TRANSCODER_C] = CHICKEN_TRANS_C,
		[TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
	};

	WARN_ON(INTEL_GEN(dev_priv) < 9);

	if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
		    !regs[cpu_transcoder].reg))
		cpu_transcoder = TRANSCODER_A;

	return regs[cpu_transcoder];
}

710 711
static void intel_psr_enable_source(struct intel_dp *intel_dp,
				    const struct intel_crtc_state *crtc_state)
712
{
713
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
714
	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
715
	u32 mask;
716

717 718 719 720 721 722
	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
	 * use hardcoded values PSR AUX transactions
	 */
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_psr_setup_aux(intel_dp);

723
	if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
724
					   !IS_GEMINILAKE(dev_priv))) {
725 726 727
		i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
							cpu_transcoder);
		u32 chicken = I915_READ(reg);
728

729 730
		chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
			   PSR2_ADD_VERTICAL_LINE_COUNT;
731
		I915_WRITE(reg, chicken);
732
	}
733 734 735 736 737 738 739

	/*
	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
	 * mask LPSP to avoid dependency on other drivers that might block
	 * runtime_pm besides preventing  other hw tracking issues now we
	 * can rely on frontbuffer tracking.
	 */
740 741 742 743 744 745 746 747
	mask = EDP_PSR_DEBUG_MASK_MEMUP |
	       EDP_PSR_DEBUG_MASK_HPD |
	       EDP_PSR_DEBUG_MASK_LPSP |
	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;

	if (INTEL_GEN(dev_priv) < 11)
		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;

748
	I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
749 750

	psr_irq_control(dev_priv, dev_priv->psr.debug);
751 752
}

753 754 755 756
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
				    const struct intel_crtc_state *crtc_state)
{
	struct intel_dp *intel_dp = dev_priv->psr.dp;
757
	u32 val;
758

759 760 761 762 763
	WARN_ON(dev_priv->psr.enabled);

	dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
	dev_priv->psr.busy_frontbuffer_bits = 0;
	dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
	dev_priv->psr.transcoder = crtc_state->cpu_transcoder;

	/*
	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
	 * will still keep the error set even after the reset done in the
	 * irq_preinstall and irq_uninstall hooks.
	 * And enabling in this situation cause the screen to freeze in the
	 * first time that PSR HW tries to activate so lets keep PSR disabled
	 * to avoid any rendering problems.
	 */
	val = I915_READ(EDP_PSR_IIR);
	val &= EDP_PSR_ERROR(edp_psr_shift(dev_priv->psr.transcoder));
	if (val) {
		dev_priv->psr.sink_not_reliable = true;
		DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
		return;
	}
781 782 783 784 785 786 787 788 789 790 791

	DRM_DEBUG_KMS("Enabling PSR%s\n",
		      dev_priv->psr.psr2_enabled ? "2" : "1");
	intel_psr_setup_vsc(intel_dp, crtc_state);
	intel_psr_enable_sink(intel_dp);
	intel_psr_enable_source(intel_dp, crtc_state);
	dev_priv->psr.enabled = true;

	intel_psr_activate(intel_dp);
}

R
Rodrigo Vivi 已提交
792 793 794
/**
 * intel_psr_enable - Enable PSR
 * @intel_dp: Intel DP
795
 * @crtc_state: new CRTC state
R
Rodrigo Vivi 已提交
796 797 798
 *
 * This function can only be called after the pipe is fully trained and enabled.
 */
799 800
void intel_psr_enable(struct intel_dp *intel_dp,
		      const struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
801
{
802
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
R
Rodrigo Vivi 已提交
803

804
	if (!crtc_state->has_psr)
R
Rodrigo Vivi 已提交
805 806
		return;

807 808 809
	if (WARN_ON(!CAN_PSR(dev_priv)))
		return;

810
	WARN_ON(dev_priv->drrs.dp);
811

R
Rodrigo Vivi 已提交
812
	mutex_lock(&dev_priv->psr.lock);
813 814 815

	if (!psr_global_enabled(dev_priv->psr.debug)) {
		DRM_DEBUG_KMS("PSR disabled by flag\n");
R
Rodrigo Vivi 已提交
816 817 818
		goto unlock;
	}

819
	intel_psr_enable_locked(dev_priv, crtc_state);
820

R
Rodrigo Vivi 已提交
821 822 823 824
unlock:
	mutex_unlock(&dev_priv->psr.lock);
}

825 826 827 828
static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
	u32 val;

829
	if (!dev_priv->psr.active) {
830
		if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
831 832 833 834 835 836 837
			val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
			WARN_ON(val & EDP_PSR2_ENABLE);
		}

		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
		WARN_ON(val & EDP_PSR_ENABLE);

838
		return;
839
	}
840 841

	if (dev_priv->psr.psr2_enabled) {
842
		val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
843
		WARN_ON(!(val & EDP_PSR2_ENABLE));
844 845
		val &= ~EDP_PSR2_ENABLE;
		I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
846
	} else {
847
		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
848
		WARN_ON(!(val & EDP_PSR_ENABLE));
849 850
		val &= ~EDP_PSR_ENABLE;
		I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
851 852 853 854
	}
	dev_priv->psr.active = false;
}

855
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
856
{
857
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
858 859
	i915_reg_t psr_status;
	u32 psr_status_mask;
R
Rodrigo Vivi 已提交
860

861 862 863 864 865 866 867 868
	lockdep_assert_held(&dev_priv->psr.lock);

	if (!dev_priv->psr.enabled)
		return;

	DRM_DEBUG_KMS("Disabling PSR%s\n",
		      dev_priv->psr.psr2_enabled ? "2" : "1");

869
	intel_psr_exit(dev_priv);
870

871
	if (dev_priv->psr.psr2_enabled) {
872
		psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
873
		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
874
	} else {
875
		psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
876
		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
877
	}
878 879

	/* Wait till PSR is idle */
880 881
	if (intel_de_wait_for_clear(dev_priv, psr_status,
				    psr_status_mask, 2000))
882
		DRM_ERROR("Timed out waiting PSR idle state\n");
883 884 885 886

	/* Disable PSR on Sink */
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);

887
	dev_priv->psr.enabled = false;
888 889
}

890 891 892
/**
 * intel_psr_disable - Disable PSR
 * @intel_dp: Intel DP
893
 * @old_crtc_state: old CRTC state
894 895 896
 *
 * This function needs to be called before disabling pipe.
 */
897 898
void intel_psr_disable(struct intel_dp *intel_dp,
		       const struct intel_crtc_state *old_crtc_state)
899
{
900
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
901

902
	if (!old_crtc_state->has_psr)
903 904
		return;

905 906 907
	if (WARN_ON(!CAN_PSR(dev_priv)))
		return;

908
	mutex_lock(&dev_priv->psr.lock);
909

910
	intel_psr_disable_locked(intel_dp);
911

R
Rodrigo Vivi 已提交
912
	mutex_unlock(&dev_priv->psr.lock);
913
	cancel_work_sync(&dev_priv->psr.work);
R
Rodrigo Vivi 已提交
914 915
}

916 917
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
{
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
	if (INTEL_GEN(dev_priv) >= 9)
		/*
		 * Display WA #0884: skl+
		 * This documented WA for bxt can be safely applied
		 * broadly so we can force HW tracking to exit PSR
		 * instead of disabling and re-enabling.
		 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
		 * but it makes more sense write to the current active
		 * pipe.
		 */
		I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
	else
		/*
		 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
		 * on older gens so doing the manual exit instead.
		 */
		intel_psr_exit(dev_priv);
935 936
}

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960
/**
 * intel_psr_update - Update PSR state
 * @intel_dp: Intel DP
 * @crtc_state: new CRTC state
 *
 * This functions will update PSR states, disabling, enabling or switching PSR
 * version when executing fastsets. For full modeset, intel_psr_disable() and
 * intel_psr_enable() should be called instead.
 */
void intel_psr_update(struct intel_dp *intel_dp,
		      const struct intel_crtc_state *crtc_state)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct i915_psr *psr = &dev_priv->psr;
	bool enable, psr2_enable;

	if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
		return;

	mutex_lock(&dev_priv->psr.lock);

	enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
	psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);

961 962 963 964
	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
		if (crtc_state->crc_enabled && psr->enabled)
			psr_force_hw_tracking_exit(dev_priv);
965 966 967 968 969 970 971 972 973
		else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
			/*
			 * Activate PSR again after a force exit when enabling
			 * CRC in older gens
			 */
			if (!dev_priv->psr.active &&
			    !dev_priv->psr.busy_frontbuffer_bits)
				schedule_work(&dev_priv->psr.work);
		}
974

975
		goto unlock;
976
	}
977

978 979
	if (psr->enabled)
		intel_psr_disable_locked(intel_dp);
980

981 982
	if (enable)
		intel_psr_enable_locked(dev_priv, crtc_state);
983 984 985 986 987

unlock:
	mutex_unlock(&dev_priv->psr.lock);
}

988 989 990 991 992 993 994 995 996 997
/**
 * intel_psr_wait_for_idle - wait for PSR1 to idle
 * @new_crtc_state: new CRTC state
 * @out_value: PSR status in case of failure
 *
 * This function is expected to be called from pipe_update_start() where it is
 * not expected to race with PSR enable or disable.
 *
 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
 */
998 999
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
			    u32 *out_value)
1000
{
1001 1002
	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1003

1004
	if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
1005 1006
		return 0;

1007 1008 1009
	/* FIXME: Update this for PSR2 if we need to wait for idle */
	if (READ_ONCE(dev_priv->psr.psr2_enabled))
		return 0;
1010 1011

	/*
1012 1013 1014 1015
	 * From bspec: Panel Self Refresh (BDW+)
	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
	 * defensive enough to cover everything.
1016
	 */
1017

1018 1019
	return __intel_wait_for_register(&dev_priv->uncore,
					 EDP_PSR_STATUS(dev_priv->psr.transcoder),
1020
					 EDP_PSR_STATUS_STATE_MASK,
1021 1022
					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
					 out_value);
1023 1024 1025
}

static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
R
Rodrigo Vivi 已提交
1026
{
1027 1028 1029 1030
	i915_reg_t reg;
	u32 mask;
	int err;

1031
	if (!dev_priv->psr.enabled)
1032
		return false;
R
Rodrigo Vivi 已提交
1033

1034
	if (dev_priv->psr.psr2_enabled) {
1035
		reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
1036
		mask = EDP_PSR2_STATUS_STATE_MASK;
1037
	} else {
1038
		reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
1039
		mask = EDP_PSR_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1040
	}
1041 1042 1043

	mutex_unlock(&dev_priv->psr.lock);

1044
	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1045 1046 1047 1048
	if (err)
		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");

	/* After the unlocked wait, verify that PSR is still wanted! */
R
Rodrigo Vivi 已提交
1049
	mutex_lock(&dev_priv->psr.lock);
1050 1051
	return err == 0 && dev_priv->psr.enabled;
}
R
Rodrigo Vivi 已提交
1052

1053
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1054
{
1055 1056 1057 1058 1059
	struct drm_device *dev = &dev_priv->drm;
	struct drm_modeset_acquire_ctx ctx;
	struct drm_atomic_state *state;
	struct drm_crtc *crtc;
	int err;
1060

1061 1062 1063
	state = drm_atomic_state_alloc(dev);
	if (!state)
		return -ENOMEM;
1064

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
	state->acquire_ctx = &ctx;

retry:
	drm_for_each_crtc(crtc, dev) {
		struct drm_crtc_state *crtc_state;
		struct intel_crtc_state *intel_crtc_state;

		crtc_state = drm_atomic_get_crtc_state(state, crtc);
		if (IS_ERR(crtc_state)) {
			err = PTR_ERR(crtc_state);
			goto error;
		}

		intel_crtc_state = to_intel_crtc_state(crtc_state);

1081
		if (crtc_state->active && intel_crtc_state->has_psr) {
1082 1083 1084 1085 1086 1087 1088
			/* Mark mode as changed to trigger a pipe->update() */
			crtc_state->mode_changed = true;
			break;
		}
	}

	err = drm_atomic_commit(state);
1089

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
error:
	if (err == -EDEADLK) {
		drm_atomic_state_clear(state);
		err = drm_modeset_backoff(&ctx);
		if (!err)
			goto retry;
	}

	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);
	drm_atomic_state_put(state);

	return err;
1103 1104
}

1105
int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
1106
{
1107 1108
	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
	u32 old_mode;
1109 1110 1111
	int ret;

	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1112
	    mode > I915_PSR_DEBUG_FORCE_PSR1) {
1113 1114 1115 1116 1117 1118 1119 1120
		DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
		return -EINVAL;
	}

	ret = mutex_lock_interruptible(&dev_priv->psr.lock);
	if (ret)
		return ret;

1121
	old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1122
	dev_priv->psr.debug = val;
1123
	psr_irq_control(dev_priv, dev_priv->psr.debug);
1124 1125

	mutex_unlock(&dev_priv->psr.lock);
1126 1127 1128 1129

	if (old_mode != mode)
		ret = intel_psr_fastset_force(dev_priv);

1130 1131 1132
	return ret;
}

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
{
	struct i915_psr *psr = &dev_priv->psr;

	intel_psr_disable_locked(psr->dp);
	psr->sink_not_reliable = true;
	/* let's make sure that sink is awaken */
	drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}

1143 1144 1145
static void intel_psr_work(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
1146
		container_of(work, typeof(*dev_priv), psr.work);
1147 1148 1149

	mutex_lock(&dev_priv->psr.lock);

1150 1151 1152
	if (!dev_priv->psr.enabled)
		goto unlock;

1153 1154 1155
	if (READ_ONCE(dev_priv->psr.irq_aux_error))
		intel_psr_handle_irq(dev_priv);

1156 1157 1158 1159 1160 1161
	/*
	 * We have to make sure PSR is ready for re-enable
	 * otherwise it keeps disabled until next full enable/disable cycle.
	 * PSR might take some time to get fully disabled
	 * and be ready for re-enable.
	 */
1162
	if (!__psr_wait_for_idle_locked(dev_priv))
R
Rodrigo Vivi 已提交
1163 1164 1165 1166 1167 1168 1169
		goto unlock;

	/*
	 * The delayed work can race with an invalidate hence we need to
	 * recheck. Since psr_flush first clears this and then reschedules we
	 * won't ever miss a flush when bailing out here.
	 */
1170
	if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
R
Rodrigo Vivi 已提交
1171 1172
		goto unlock;

1173
	intel_psr_activate(dev_priv->psr.dp);
R
Rodrigo Vivi 已提交
1174 1175 1176 1177
unlock:
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
1178 1179
/**
 * intel_psr_invalidate - Invalidade PSR
1180
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1181
 * @frontbuffer_bits: frontbuffer plane tracking bits
1182
 * @origin: which operation caused the invalidate
R
Rodrigo Vivi 已提交
1183 1184 1185 1186 1187 1188 1189 1190
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 */
1191
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1192
			  unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1193
{
1194
	if (!CAN_PSR(dev_priv))
1195 1196
		return;

1197
	if (origin == ORIGIN_FLIP)
1198 1199
		return;

R
Rodrigo Vivi 已提交
1200 1201 1202 1203 1204 1205
	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

1206
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
R
Rodrigo Vivi 已提交
1207
	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1208 1209

	if (frontbuffer_bits)
1210
		intel_psr_exit(dev_priv);
1211

R
Rodrigo Vivi 已提交
1212 1213 1214
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
1215 1216
/**
 * intel_psr_flush - Flush PSR
1217
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1218
 * @frontbuffer_bits: frontbuffer plane tracking bits
1219
 * @origin: which operation caused the flush
R
Rodrigo Vivi 已提交
1220 1221 1222 1223 1224 1225 1226 1227
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering has completed and flushed out to memory. PSR
 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
 */
1228
void intel_psr_flush(struct drm_i915_private *dev_priv,
1229
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1230
{
1231
	if (!CAN_PSR(dev_priv))
1232 1233
		return;

1234
	if (origin == ORIGIN_FLIP)
1235 1236
		return;

R
Rodrigo Vivi 已提交
1237 1238 1239 1240 1241 1242
	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

1243
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
R
Rodrigo Vivi 已提交
1244 1245
	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;

1246
	/* By definition flush = invalidate + flush */
1247 1248
	if (frontbuffer_bits)
		psr_force_hw_tracking_exit(dev_priv);
1249

R
Rodrigo Vivi 已提交
1250
	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1251
		schedule_work(&dev_priv->psr.work);
R
Rodrigo Vivi 已提交
1252 1253 1254
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
1255 1256
/**
 * intel_psr_init - Init basic PSR work and mutex.
1257
 * @dev_priv: i915 device private
R
Rodrigo Vivi 已提交
1258 1259 1260 1261
 *
 * This function is  called only once at driver load to initialize basic
 * PSR stuff.
 */
1262
void intel_psr_init(struct drm_i915_private *dev_priv)
R
Rodrigo Vivi 已提交
1263
{
1264 1265 1266
	if (!HAS_PSR(dev_priv))
		return;

1267 1268 1269
	if (!dev_priv->psr.sink_support)
		return;

1270 1271 1272 1273 1274 1275 1276 1277
	if (IS_HASWELL(dev_priv))
		/*
		 * HSW don't have PSR registers on the same space as transcoder
		 * so set this to a value that when subtract to the register
		 * in transcoder space results in the right offset for HSW
		 */
		dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;

1278 1279 1280
	if (i915_modparams.enable_psr == -1)
		if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
			i915_modparams.enable_psr = 0;
1281

1282
	/* Set link_standby x link_off defaults */
1283
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1284 1285 1286 1287 1288 1289
		/* HSW and BDW require workarounds that we don't implement. */
		dev_priv->psr.link_standby = false;
	else
		/* For new platforms let's respect VBT back again */
		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;

1290
	INIT_WORK(&dev_priv->psr.work, intel_psr_work);
R
Rodrigo Vivi 已提交
1291 1292
	mutex_init(&dev_priv->psr.lock);
}
1293 1294 1295

void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
1296
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1297 1298
	struct i915_psr *psr = &dev_priv->psr;
	u8 val;
1299
	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1300 1301
			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
			  DP_PSR_LINK_CRC_ERROR;
1302 1303 1304 1305 1306 1307

	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
		return;

	mutex_lock(&psr->lock);

1308
	if (!psr->enabled || psr->dp != intel_dp)
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
		goto exit;

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
		DRM_ERROR("PSR_STATUS dpcd read failed\n");
		goto exit;
	}

	if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
		DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
		intel_psr_disable_locked(intel_dp);
1319
		psr->sink_not_reliable = true;
1320 1321
	}

1322 1323 1324 1325 1326 1327 1328 1329 1330
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
		DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
		goto exit;
	}

	if (val & DP_PSR_RFB_STORAGE_ERROR)
		DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
	if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
		DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1331 1332
	if (val & DP_PSR_LINK_CRC_ERROR)
		DRM_ERROR("PSR Link CRC error, disabling PSR\n");
1333 1334 1335 1336

	if (val & ~errors)
		DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
			  val & ~errors);
1337
	if (val & errors) {
1338
		intel_psr_disable_locked(intel_dp);
1339 1340
		psr->sink_not_reliable = true;
	}
1341 1342
	/* clear status register */
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
1343 1344 1345
exit:
	mutex_unlock(&psr->lock);
}
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360

bool intel_psr_enabled(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	bool ret;

	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
		return false;

	mutex_lock(&dev_priv->psr.lock);
	ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
	mutex_unlock(&dev_priv->psr.lock);

	return ret;
}