intel_psr.c 63.3 KB
Newer Older
R
Rodrigo Vivi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

24 25
#include <drm/drm_atomic_helper.h>

26 27
#include "display/intel_dp.h"

28
#include "i915_drv.h"
29
#include "intel_atomic.h"
30
#include "intel_de.h"
31
#include "intel_display_types.h"
32 33
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
34
#include "intel_psr.h"
35
#include "intel_sprite.h"
36
#include "skl_universal_plane.h"
37

R
Rodrigo Vivi 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
/**
 * DOC: Panel Self Refresh (PSR/SRD)
 *
 * Since Haswell Display controller supports Panel Self-Refresh on display
 * panels witch have a remote frame buffer (RFB) implemented according to PSR
 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
 * when system is idle but display is on as it eliminates display refresh
 * request to DDR memory completely as long as the frame buffer for that
 * display is unchanged.
 *
 * Panel Self Refresh must be supported by both Hardware (source) and
 * Panel (sink).
 *
 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
 * to power down the link and memory controller. For DSI panels the same idea
 * is called "manual mode".
 *
 * The implementation uses the hardware-based PSR support which automatically
 * enters/exits self-refresh mode. The hardware takes care of sending the
 * required DP aux message and could even retrain the link (that part isn't
 * enabled yet though). The hardware also keeps track of any frontbuffer
 * changes to know when to exit self-refresh mode again. Unfortunately that
 * part doesn't work too well, hence why the i915 PSR support uses the
 * software frontbuffer tracking to make sure it doesn't miss a screen
 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
 * get called by the frontbuffer tracking code. Note that because of locking
 * issues the self-refresh re-enable code is done from a work queue, which
 * must be correctly synchronized/cancelled when shutting down the pipe."
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
 *
 * DC3CO (DC3 clock off)
 *
 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
 * clock off automatically during PSR2 idle state.
 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
 * entry/exit allows the HW to enter a low-power state even when page flipping
 * periodically (for instance a 30fps video playback scenario).
 *
 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
 * frames, if no other flip occurs and the function above is executed, DC3CO is
 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
 * of another flip.
 * Front buffer modifications do not trigger DC3CO activation on purpose as it
 * would bring a lot of complexity and most of the moderns systems will only
 * use page flips.
R
Rodrigo Vivi 已提交
83 84
 */

85
static bool psr_global_enabled(struct intel_dp *intel_dp)
86
{
87 88 89
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
90
	case I915_PSR_DEBUG_DEFAULT:
91
		return i915->params.enable_psr;
92 93 94 95 96 97 98
	case I915_PSR_DEBUG_DISABLE:
		return false;
	default:
		return true;
	}
}

99
static bool psr2_global_enabled(struct intel_dp *intel_dp)
100
{
101
	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
102
	case I915_PSR_DEBUG_DISABLE:
103 104 105
	case I915_PSR_DEBUG_FORCE_PSR1:
		return false;
	default:
106
		return true;
107 108 109
	}
}

110
static void psr_irq_control(struct intel_dp *intel_dp)
111
{
112
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
113 114
	enum transcoder trans_shift;
	i915_reg_t imr_reg;
115
	u32 mask, val;
116

117 118 119 120 121
	/*
	 * gen12+ has registers relative to transcoder and one per transcoder
	 * using the same bit definition: handle it as TRANSCODER_EDP to force
	 * 0 shift in bit definition
	 */
122
	if (DISPLAY_VER(dev_priv) >= 12) {
123
		trans_shift = 0;
124
		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
125
	} else {
126
		trans_shift = intel_dp->psr.transcoder;
127 128 129 130
		imr_reg = EDP_PSR_IMR;
	}

	mask = EDP_PSR_ERROR(trans_shift);
131
	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
132 133
		mask |= EDP_PSR_POST_EXIT(trans_shift) |
			EDP_PSR_PRE_ENTRY(trans_shift);
134 135

	/* Warning: it is masking/setting reserved bits too */
136
	val = intel_de_read(dev_priv, imr_reg);
137
	val &= ~EDP_PSR_TRANS_MASK(trans_shift);
138
	val |= ~mask;
139
	intel_de_write(dev_priv, imr_reg, val);
140 141
}

142 143
static void psr_event_print(struct drm_i915_private *i915,
			    u32 val, bool psr2_enabled)
144
{
145
	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
146
	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
147
		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
148
	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
149
		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
150
	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
151
		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
152
	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
153
		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
154
	if (val & PSR_EVENT_GRAPHICS_RESET)
155
		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
156
	if (val & PSR_EVENT_PCH_INTERRUPT)
157
		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
158
	if (val & PSR_EVENT_MEMORY_UP)
159
		drm_dbg_kms(&i915->drm, "\tMemory up\n");
160
	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
161
		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
162
	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
163
		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
164
	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
165
		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
166
	if (val & PSR_EVENT_REGISTER_UPDATE)
167
		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
168
	if (val & PSR_EVENT_HDCP_ENABLE)
169
		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
170
	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
171
		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
172
	if (val & PSR_EVENT_VBI_ENABLE)
173
		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
174
	if (val & PSR_EVENT_LPSP_MODE_EXIT)
175
		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
176
	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
177
		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
178 179
}

180
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
181
{
182 183 184
	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	ktime_t time_ns =  ktime_get();
185 186
	enum transcoder trans_shift;
	i915_reg_t imr_reg;
187

188
	if (DISPLAY_VER(dev_priv) >= 12) {
189
		trans_shift = 0;
190
		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
191
	} else {
192
		trans_shift = intel_dp->psr.transcoder;
193 194 195 196
		imr_reg = EDP_PSR_IMR;
	}

	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
197
		intel_dp->psr.last_entry_attempt = time_ns;
198 199 200
		drm_dbg_kms(&dev_priv->drm,
			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
			    transcoder_name(cpu_transcoder));
201
	}
202

203
	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
204
		intel_dp->psr.last_exit = time_ns;
205 206 207
		drm_dbg_kms(&dev_priv->drm,
			    "[transcoder %s] PSR exit completed\n",
			    transcoder_name(cpu_transcoder));
208

209
		if (DISPLAY_VER(dev_priv) >= 9) {
210 211
			u32 val = intel_de_read(dev_priv,
						PSR_EVENT(cpu_transcoder));
212
			bool psr2_enabled = intel_dp->psr.psr2_enabled;
213

214 215
			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
				       val);
216
			psr_event_print(dev_priv, val, psr2_enabled);
217
		}
218
	}
219

220
	if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
221
		u32 val;
222

223
		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
224
			 transcoder_name(cpu_transcoder));
225

226
		intel_dp->psr.irq_aux_error = true;
227

228 229 230 231 232 233 234 235
		/*
		 * If this interruption is not masked it will keep
		 * interrupting so fast that it prevents the scheduled
		 * work to run.
		 * Also after a PSR error, we don't want to arm PSR
		 * again so we don't care about unmask the interruption
		 * or unset irq_aux_error.
		 */
236
		val = intel_de_read(dev_priv, imr_reg);
237
		val |= EDP_PSR_ERROR(trans_shift);
238
		intel_de_write(dev_priv, imr_reg, val);
239

240
		schedule_work(&intel_dp->psr.work);
241
	}
242 243
}

244 245
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
246
	u8 alpm_caps = 0;
247 248 249 250 251 252 253

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
			      &alpm_caps) != 1)
		return false;
	return alpm_caps & DP_ALPM_CAP;
}

254 255
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
256
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
257
	u8 val = 8; /* assume the worst if we can't read the value */
258 259 260 261 262

	if (drm_dp_dpcd_readb(&intel_dp->aux,
			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
	else
263 264
		drm_dbg_kms(&i915->drm,
			    "Unable to get sink synchronization latency, assuming 8 frames\n");
265 266 267
	return val;
}

268 269
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
{
270
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
271 272 273 274 275 276 277 278 279 280 281 282
	u16 val;
	ssize_t r;

	/*
	 * Returning the default X granularity if granularity not required or
	 * if DPCD read fails
	 */
	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
		return 4;

	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
	if (r != 2)
283 284
		drm_dbg_kms(&i915->drm,
			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
285 286 287 288 289 290 291 292 293 294 295

	/*
	 * Spec says that if the value read is 0 the default granularity should
	 * be used instead.
	 */
	if (r != 2 || val == 0)
		val = 4;

	return val;
}

296 297 298 299 300 301 302 303
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);

	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
			 sizeof(intel_dp->psr_dpcd));

304 305
	if (!intel_dp->psr_dpcd[0])
		return;
306 307
	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
		    intel_dp->psr_dpcd[0]);
308

309
	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
310 311
		drm_dbg_kms(&dev_priv->drm,
			    "PSR support not currently available for this panel\n");
312 313 314
		return;
	}

315
	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
316 317
		drm_dbg_kms(&dev_priv->drm,
			    "Panel lacks power state control, PSR cannot be enabled\n");
318 319
		return;
	}
320

321 322
	intel_dp->psr.sink_support = true;
	intel_dp->psr.sink_sync_latency =
323
		intel_dp_get_sink_sync_latency(intel_dp);
324

325
	if (DISPLAY_VER(dev_priv) >= 9 &&
326
	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
327 328 329 330
		bool y_req = intel_dp->psr_dpcd[1] &
			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
		bool alpm = intel_dp_get_alpm_status(intel_dp);

331 332 333 334 335 336 337 338 339 340 341
		/*
		 * All panels that supports PSR version 03h (PSR2 +
		 * Y-coordinate) can handle Y-coordinates in VSC but we are
		 * only sure that it is going to be used when required by the
		 * panel. This way panel is capable to do selective update
		 * without a aux frame sync.
		 *
		 * To support PSR version 02h and PSR version 03h without
		 * Y-coordinate requirement panels we would need to enable
		 * GTC first.
		 */
342
		intel_dp->psr.sink_psr2_support = y_req && alpm;
343
		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
344
			    intel_dp->psr.sink_psr2_support ? "" : "not ");
345

346 347
		if (intel_dp->psr.sink_psr2_support) {
			intel_dp->psr.colorimetry_support =
348
				intel_dp_get_colorimetry_status(intel_dp);
349
			intel_dp->psr.su_x_granularity =
350
				intel_dp_get_su_x_granulartiy(intel_dp);
351 352 353 354
		}
	}
}

355
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
356
{
357
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
358 359
	u32 aux_clock_divider, aux_ctl;
	int i;
360
	static const u8 aux_msg[] = {
R
Rodrigo Vivi 已提交
361 362 363 364 365 366
		[0] = DP_AUX_NATIVE_WRITE << 4,
		[1] = DP_SET_POWER >> 8,
		[2] = DP_SET_POWER & 0xff,
		[3] = 1 - 1,
		[4] = DP_SET_POWER_D0,
	};
367 368 369 370
	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
R
Rodrigo Vivi 已提交
371 372

	BUILD_BUG_ON(sizeof(aux_msg) > 20);
373
	for (i = 0; i < sizeof(aux_msg); i += 4)
374
		intel_de_write(dev_priv,
375
			       EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
376
			       intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
377

378 379 380
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);

	/* Start with bits set for DDI_AUX_CTL register */
381
	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
382
					     aux_clock_divider);
383 384 385

	/* Select only valid bits for SRD_AUX_CTL */
	aux_ctl &= psr_aux_mask;
386
	intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
387
		       aux_ctl);
388 389
}

390
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
391
{
392
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
393
	u8 dpcd_val = DP_PSR_ENABLE;
394

395
	/* Enable ALPM at sink for psr2 */
396
	if (intel_dp->psr.psr2_enabled) {
397
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
398 399 400
				   DP_ALPM_ENABLE |
				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);

401
		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
402
	} else {
403
		if (intel_dp->psr.link_standby)
404
			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
405

406
		if (DISPLAY_VER(dev_priv) >= 8)
407
			dpcd_val |= DP_PSR_CRC_VERIFICATION;
408 409
	}

410
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
411

412
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
R
Rodrigo Vivi 已提交
413 414
}

415
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
416
{
417
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
418
	u32 val = 0;
419

420
	if (DISPLAY_VER(dev_priv) >= 11)
421 422
		val |= EDP_PSR_TP4_TIME_0US;

423
	if (dev_priv->params.psr_safest_params) {
424 425 426 427 428
		val |= EDP_PSR_TP1_TIME_2500us;
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
		goto check_tp3_sel;
	}

429
	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
430
		val |= EDP_PSR_TP1_TIME_0us;
431
	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
432
		val |= EDP_PSR_TP1_TIME_100us;
433 434
	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
		val |= EDP_PSR_TP1_TIME_500us;
435
	else
436
		val |= EDP_PSR_TP1_TIME_2500us;
437

438
	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
439
		val |= EDP_PSR_TP2_TP3_TIME_0us;
440
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
441
		val |= EDP_PSR_TP2_TP3_TIME_100us;
442 443
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
		val |= EDP_PSR_TP2_TP3_TIME_500us;
444
	else
445
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
446

447
check_tp3_sel:
448 449 450 451 452 453
	if (intel_dp_source_supports_hbr2(intel_dp) &&
	    drm_dp_tps3_supported(intel_dp->dpcd))
		val |= EDP_PSR_TP1_TP3_SEL;
	else
		val |= EDP_PSR_TP1_TP2_SEL;

454 455 456
	return val;
}

457
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
458 459
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
460
	int idle_frames;
461 462 463 464

	/* Let's use 6 as the minimum to cover all known cases including the
	 * off-by-one issue that HW has in some cases.
	 */
465
	idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
466
	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
467

468
	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
469 470 471 472 473 474 475 476 477 478 479 480
		idle_frames = 0xf;

	return idle_frames;
}

static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 max_sleep_time = 0x1f;
	u32 val = EDP_PSR_ENABLE;

	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
481 482 483 484 485

	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
	if (IS_HASWELL(dev_priv))
		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;

486
	if (intel_dp->psr.link_standby)
487 488 489 490
		val |= EDP_PSR_LINK_STANDBY;

	val |= intel_psr1_get_tp_time(intel_dp);

491
	if (DISPLAY_VER(dev_priv) >= 8)
492 493
		val |= EDP_PSR_CRC_ENABLE;

494
	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
495
		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
496
	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
497
}
498

499
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
500
{
501
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
502
	u32 val = 0;
503

504
	if (dev_priv->params.psr_safest_params)
505
		return EDP_PSR2_TP2_TIME_2500us;
506

507 508
	if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
509
		val |= EDP_PSR2_TP2_TIME_50us;
510
	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
511
		val |= EDP_PSR2_TP2_TIME_100us;
512
	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
513
		val |= EDP_PSR2_TP2_TIME_500us;
514
	else
515
		val |= EDP_PSR2_TP2_TIME_2500us;
516

517 518 519 520 521 522 523 524 525 526 527
	return val;
}

static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 val;

	val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;

	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
528
	if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
529 530
		val |= EDP_Y_COORDINATE_ENABLE;

531
	val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
532 533
	val |= intel_psr2_get_tp_time(intel_dp);

534
	if (DISPLAY_VER(dev_priv) >= 12) {
535 536 537 538 539 540 541 542 543 544
		/*
		 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
		 * values from BSpec. In order to setting an optimal power
		 * consumption, lower than 4k resoluition mode needs to decrese
		 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
		 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
		 */
		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
		val |= TGL_EDP_PSR2_FAST_WAKE(7);
545
	} else if (DISPLAY_VER(dev_priv) >= 9) {
546 547 548 549
		val |= EDP_PSR2_IO_BUFFER_WAKE(7);
		val |= EDP_PSR2_FAST_WAKE(7);
	}

550
	if (intel_dp->psr.psr2_sel_fetch_enabled) {
551
		/* WA 1408330847 */
552
		if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
553 554 555 556 557
		    IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
			intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
				     DIS_RAM_BYPASS_PSR2_MAN_TRACK,
				     DIS_RAM_BYPASS_PSR2_MAN_TRACK);

558
		intel_de_write(dev_priv,
559
			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
560
			       PSR2_MAN_TRK_CTL_ENABLE);
561
	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
562
		intel_de_write(dev_priv,
563
			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
564
	}
565

566
	/*
567 568
	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
	 * recommending keep this bit unset while PSR2 is enabled.
569
	 */
570
	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
571

572
	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
R
Rodrigo Vivi 已提交
573 574
}

575 576 577
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
578
	if (DISPLAY_VER(dev_priv) < 9)
579
		return false;
580
	else if (DISPLAY_VER(dev_priv) >= 12)
581 582 583 584 585
		return trans == TRANSCODER_A;
	else
		return trans == TRANSCODER_EDP;
}

586 587
static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
{
588
	if (!cstate || !cstate->hw.active)
589 590 591
		return 0;

	return DIV_ROUND_UP(1000 * 1000,
592
			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
593 594
}

595
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
596 597
				     u32 idle_frames)
{
598
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
599 600 601
	u32 val;

	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
602
	val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
603 604
	val &= ~EDP_PSR2_IDLE_FRAME_MASK;
	val |= idle_frames;
605
	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
606 607
}

608
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
609
{
610 611 612
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

	psr2_program_idle_frames(intel_dp, 0);
613 614 615
	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}

616
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
617
{
618
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
619 620

	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
621
	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
622 623
}

624
static void tgl_dc3co_disable_work(struct work_struct *work)
625
{
626 627
	struct intel_dp *intel_dp =
		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
628

629
	mutex_lock(&intel_dp->psr.lock);
630
	/* If delayed work is pending, it is not idle */
631
	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
632 633
		goto unlock;

634
	tgl_psr2_disable_dc3co(intel_dp);
635
unlock:
636
	mutex_unlock(&intel_dp->psr.lock);
637 638
}

639
static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
640
{
641
	if (!intel_dp->psr.dc3co_exitline)
642 643
		return;

644
	cancel_delayed_work(&intel_dp->psr.dc3co_work);
645
	/* Before PSR2 exit disallow dc3co*/
646
	tgl_psr2_disable_dc3co(intel_dp);
647 648
}

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
static bool
dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
			      struct intel_crtc_state *crtc_state)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	enum port port = dig_port->base.port;

	if (IS_ALDERLAKE_P(dev_priv))
		return pipe <= PIPE_B && port <= PORT_B;
	else
		return pipe == PIPE_A && port == PORT_A;
}

664 665 666 667 668 669 670 671
static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
				  struct intel_crtc_state *crtc_state)
{
	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 exit_scanlines;

672 673 674 675 676 677 678
	/*
	 * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
	 * disable DC3CO until the changed dc3co activating/deactivating sequence
	 * is applied. B.Specs:49196
	 */
	return;

679 680 681 682 683 684 685
	/*
	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
	 * TODO: when the issue is addressed, this restriction should be removed.
	 */
	if (crtc_state->enable_psr2_sel_fetch)
		return;

686
	if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
687 688
		return;

689
	if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
690 691 692 693 694 695 696 697 698
		return;

	/*
	 * DC3CO Exit time 200us B.Spec 49196
	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
	 */
	exit_scanlines =
		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;

699
	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
700 701 702 703 704
		return;

	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
}

705 706 707 708 709 710 711 712 713
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
					      struct intel_crtc_state *crtc_state)
{
	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;

714 715
	if (!dev_priv->params.enable_psr2_sel_fetch &&
	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 sel fetch not enabled, disabled by parameter\n");
		return false;
	}

	if (crtc_state->uapi.async_flip) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 sel fetch not enabled, async flip enabled\n");
		return false;
	}

	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
			drm_dbg_kms(&dev_priv->drm,
				    "PSR2 sel fetch not enabled, plane rotated\n");
			return false;
		}
	}

735 736 737 738 739 740 741
	/* Wa_14010254185 Wa_14010103792 */
	if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
		return false;
	}

742 743 744
	return crtc_state->enable_psr2_sel_fetch = true;
}

745 746 747
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
				    struct intel_crtc_state *crtc_state)
{
748
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
749 750
	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
751
	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
752

753
	if (!intel_dp->psr.sink_psr2_support)
754 755
		return false;

756 757 758 759 760 761
	/* JSL and EHL only supports eDP 1.3 */
	if (IS_JSL_EHL(dev_priv)) {
		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
		return false;
	}

762 763 764 765 766 767
	/* Wa_16011181250 */
	if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv)) {
		drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
		return false;
	}

768 769 770 771 772 773 774 775 776
	/*
	 * We are missing the implementation of some workarounds to enabled PSR2
	 * in Alderlake_P, until ready PSR2 should be kept disabled.
	 */
	if (IS_ALDERLAKE_P(dev_priv)) {
		drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n");
		return false;
	}

777
	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
778 779 780
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not supported in transcoder %s\n",
			    transcoder_name(crtc_state->cpu_transcoder));
781 782 783
		return false;
	}

784
	if (!psr2_global_enabled(intel_dp)) {
785 786 787 788
		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
		return false;
	}

789 790 791 792 793
	/*
	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
	 * resolution requires DSC to be enabled, priority is given to DSC
	 * over PSR2.
	 */
794
	if (crtc_state->dsc.compression_enable) {
795 796
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 cannot be enabled since DSC is enabled\n");
797 798 799
		return false;
	}

800 801 802 803 804 805
	if (crtc_state->crc_enabled) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
		return false;
	}

806
	if (DISPLAY_VER(dev_priv) >= 12) {
807 808
		psr_max_h = 5120;
		psr_max_v = 3200;
809
		max_bpp = 30;
810
	} else if (DISPLAY_VER(dev_priv) >= 10) {
811 812
		psr_max_h = 4096;
		psr_max_v = 2304;
813
		max_bpp = 24;
814
	} else if (DISPLAY_VER(dev_priv) == 9) {
815 816
		psr_max_h = 3640;
		psr_max_v = 2304;
817
		max_bpp = 24;
818 819
	}

820
	if (crtc_state->pipe_bpp > max_bpp) {
821 822 823
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
			    crtc_state->pipe_bpp, max_bpp);
824 825 826
		return false;
	}

827 828 829
	/*
	 * HW sends SU blocks of size four scan lines, which means the starting
	 * X coordinate and Y granularity requirements will always be met. We
830 831
	 * only need to validate the SU block width is a multiple of
	 * x granularity.
832
	 */
833
	if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
834 835
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
836
			    crtc_hdisplay, intel_dp->psr.su_x_granularity);
837 838 839
		return false;
	}

840 841 842 843 844 845 846
	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
		    !HAS_PSR_HW_TRACKING(dev_priv)) {
			drm_dbg_kms(&dev_priv->drm,
				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
			return false;
		}
847 848
	}

849 850 851 852 853 854 855
	/* Wa_2209313811 */
	if (!crtc_state->enable_psr2_sel_fetch &&
	    IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
		drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
		return false;
	}

856 857
	if (!crtc_state->enable_psr2_sel_fetch &&
	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
858 859 860 861
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
			    crtc_hdisplay, crtc_vdisplay,
			    psr_max_h, psr_max_v);
862 863 864
		return false;
	}

865
	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
866 867 868
	return true;
}

869 870
void intel_psr_compute_config(struct intel_dp *intel_dp,
			      struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
871
{
872
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
873
	const struct drm_display_mode *adjusted_mode =
874
		&crtc_state->hw.adjusted_mode;
875
	int psr_setup_time;
R
Rodrigo Vivi 已提交
876

877 878 879 880 881 882 883
	/*
	 * Current PSR panels dont work reliably with VRR enabled
	 * So if VRR is enabled, do not enable PSR.
	 */
	if (crtc_state->vrr.enable)
		return;

884
	if (!CAN_PSR(intel_dp))
885 886
		return;

887
	if (!psr_global_enabled(intel_dp)) {
888
		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
889
		return;
890 891
	}

892
	if (intel_dp->psr.sink_not_reliable) {
893 894
		drm_dbg_kms(&dev_priv->drm,
			    "PSR sink implementation is not reliable\n");
895 896 897
		return;
	}

898
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
899 900
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Interlaced mode enabled\n");
901
		return;
R
Rodrigo Vivi 已提交
902 903
	}

904 905
	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
	if (psr_setup_time < 0) {
906 907 908
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
			    intel_dp->psr_dpcd[1]);
909
		return;
910 911 912 913
	}

	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
914 915 916
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: PSR setup time (%d us) too long\n",
			    psr_setup_time);
917 918 919 920
		return;
	}

	crtc_state->has_psr = true;
921
	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
922
	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
R
Rodrigo Vivi 已提交
923 924
}

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
void intel_psr_get_config(struct intel_encoder *encoder,
			  struct intel_crtc_state *pipe_config)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
	struct intel_dp *intel_dp;
	u32 val;

	if (!dig_port)
		return;

	intel_dp = &dig_port->dp;
	if (!CAN_PSR(intel_dp))
		return;

	mutex_lock(&intel_dp->psr.lock);
	if (!intel_dp->psr.enabled)
		goto unlock;

	/*
	 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
	 * enabled/disabled because of frontbuffer tracking and others.
	 */
	pipe_config->has_psr = true;
	pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
	pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);

	if (!intel_dp->psr.psr2_enabled)
		goto unlock;

	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
		val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
		if (val & PSR2_MAN_TRK_CTL_ENABLE)
			pipe_config->enable_psr2_sel_fetch = true;
	}

	if (DISPLAY_VER(dev_priv) >= 12) {
		val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder));
		val &= EXITLINE_MASK;
		pipe_config->dc3co_exitline = val;
	}
unlock:
	mutex_unlock(&intel_dp->psr.lock);
}

970
static void intel_psr_activate(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
971
{
972
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
973
	enum transcoder transcoder = intel_dp->psr.transcoder;
R
Rodrigo Vivi 已提交
974

975
	if (transcoder_has_psr2(dev_priv, transcoder))
976
		drm_WARN_ON(&dev_priv->drm,
977
			    intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
978

979
	drm_WARN_ON(&dev_priv->drm,
980 981 982
		    intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
	lockdep_assert_held(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
983

984
	/* psr1 and psr2 are mutually exclusive.*/
985
	if (intel_dp->psr.psr2_enabled)
986 987 988 989
		hsw_activate_psr2(intel_dp);
	else
		hsw_activate_psr1(intel_dp);

990
	intel_dp->psr.active = true;
R
Rodrigo Vivi 已提交
991 992
}

993
static void intel_psr_enable_source(struct intel_dp *intel_dp)
994
{
995
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
996
	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
997
	u32 mask;
998

999 1000 1001 1002 1003 1004
	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
	 * use hardcoded values PSR AUX transactions
	 */
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_psr_setup_aux(intel_dp);

1005
	if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
1006
		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
1007
		u32 chicken = intel_de_read(dev_priv, reg);
1008

1009 1010
		chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
			   PSR2_ADD_VERTICAL_LINE_COUNT;
1011
		intel_de_write(dev_priv, reg, chicken);
1012
	}
1013 1014 1015 1016 1017 1018 1019

	/*
	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
	 * mask LPSP to avoid dependency on other drivers that might block
	 * runtime_pm besides preventing  other hw tracking issues now we
	 * can rely on frontbuffer tracking.
	 */
1020 1021 1022 1023 1024
	mask = EDP_PSR_DEBUG_MASK_MEMUP |
	       EDP_PSR_DEBUG_MASK_HPD |
	       EDP_PSR_DEBUG_MASK_LPSP |
	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;

1025
	if (DISPLAY_VER(dev_priv) < 11)
1026 1027
		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;

1028
	intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
1029
		       mask);
1030

1031
	psr_irq_control(intel_dp);
1032

1033
	if (intel_dp->psr.dc3co_exitline) {
1034 1035 1036 1037 1038 1039
		u32 val;

		/*
		 * TODO: if future platforms supports DC3CO in more than one
		 * transcoder, EXITLINE will need to be unset when disabling PSR
		 */
1040
		val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
1041
		val &= ~EXITLINE_MASK;
1042
		val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT;
1043
		val |= EXITLINE_ENABLE;
1044
		intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
1045
	}
1046

1047
	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1048
		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
1049
			     intel_dp->psr.psr2_sel_fetch_enabled ?
1050
			     IGNORE_PSR2_HW_TRACKING : 0);
1051 1052
}

1053
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1054
{
1055
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1056
	u32 val;
1057

1058 1059 1060 1061 1062 1063 1064 1065
	/*
	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
	 * will still keep the error set even after the reset done in the
	 * irq_preinstall and irq_uninstall hooks.
	 * And enabling in this situation cause the screen to freeze in the
	 * first time that PSR HW tries to activate so lets keep PSR disabled
	 * to avoid any rendering problems.
	 */
1066
	if (DISPLAY_VER(dev_priv) >= 12) {
1067
		val = intel_de_read(dev_priv,
1068
				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
1069 1070
		val &= EDP_PSR_ERROR(0);
	} else {
1071
		val = intel_de_read(dev_priv, EDP_PSR_IIR);
1072
		val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
1073
	}
1074
	if (val) {
1075
		intel_dp->psr.sink_not_reliable = true;
1076 1077
		drm_dbg_kms(&dev_priv->drm,
			    "PSR interruption error set, not enabling PSR\n");
1078
		return false;
1079
	}
1080

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
	return true;
}

static void intel_psr_enable_locked(struct intel_dp *intel_dp,
				    const struct intel_crtc_state *crtc_state,
				    const struct drm_connector_state *conn_state)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct intel_encoder *encoder = &dig_port->base;
	u32 val;

	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);

	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
	intel_dp->psr.busy_frontbuffer_bits = 0;
	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
	/* DC5/DC6 requires at least 6 idle frames */
	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
	intel_dp->psr.dc3co_exit_delay = val;
	intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;

	if (!psr_interrupt_error_check(intel_dp))
		return;

1108
	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1109
		    intel_dp->psr.psr2_enabled ? "2" : "1");
1110
	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1111 1112
				     &intel_dp->psr.vsc);
	intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
1113
	intel_psr_enable_sink(intel_dp);
1114
	intel_psr_enable_source(intel_dp);
1115
	intel_dp->psr.enabled = true;
1116
	intel_dp->psr.paused = false;
1117 1118 1119 1120

	intel_psr_activate(intel_dp);
}

R
Rodrigo Vivi 已提交
1121 1122 1123
/**
 * intel_psr_enable - Enable PSR
 * @intel_dp: Intel DP
1124
 * @crtc_state: new CRTC state
1125
 * @conn_state: new CONNECTOR state
R
Rodrigo Vivi 已提交
1126 1127 1128
 *
 * This function can only be called after the pipe is fully trained and enabled.
 */
1129
void intel_psr_enable(struct intel_dp *intel_dp,
1130 1131
		      const struct intel_crtc_state *crtc_state,
		      const struct drm_connector_state *conn_state)
R
Rodrigo Vivi 已提交
1132
{
1133
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
R
Rodrigo Vivi 已提交
1134

1135
	if (!CAN_PSR(intel_dp))
R
Rodrigo Vivi 已提交
1136 1137
		return;

1138
	if (!crtc_state->has_psr)
1139 1140
		return;

1141
	drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
1142

1143 1144 1145
	mutex_lock(&intel_dp->psr.lock);
	intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
	mutex_unlock(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
1146 1147
}

1148
static void intel_psr_exit(struct intel_dp *intel_dp)
1149
{
1150
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1151 1152
	u32 val;

1153 1154
	if (!intel_dp->psr.active) {
		if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1155
			val = intel_de_read(dev_priv,
1156
					    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1157
			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1158 1159
		}

1160
		val = intel_de_read(dev_priv,
1161
				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1162
		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1163

1164
		return;
1165
	}
1166

1167 1168
	if (intel_dp->psr.psr2_enabled) {
		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1169
		val = intel_de_read(dev_priv,
1170
				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1171
		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1172
		val &= ~EDP_PSR2_ENABLE;
1173
		intel_de_write(dev_priv,
1174
			       EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1175
	} else {
1176
		val = intel_de_read(dev_priv,
1177
				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1178
		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1179
		val &= ~EDP_PSR_ENABLE;
1180
		intel_de_write(dev_priv,
1181
			       EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1182
	}
1183
	intel_dp->psr.active = false;
1184 1185
}

1186
static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1187
{
1188
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1189 1190
	i915_reg_t psr_status;
	u32 psr_status_mask;
R
Rodrigo Vivi 已提交
1191

1192 1193
	if (intel_dp->psr.psr2_enabled) {
		psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1194
		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1195
	} else {
1196
		psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1197
		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1198
	}
1199 1200

	/* Wait till PSR is idle */
1201 1202
	if (intel_de_wait_for_clear(dev_priv, psr_status,
				    psr_status_mask, 2000))
1203
		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
}

static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

	lockdep_assert_held(&intel_dp->psr.lock);

	if (!intel_dp->psr.enabled)
		return;

	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
		    intel_dp->psr.psr2_enabled ? "2" : "1");

	intel_psr_exit(intel_dp);
	intel_psr_wait_exit_locked(intel_dp);
1220

1221
	/* WA 1408330847 */
1222
	if (intel_dp->psr.psr2_sel_fetch_enabled &&
1223
	    (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
1224 1225 1226 1227
	     IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
			     DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);

1228 1229 1230
	/* Disable PSR on Sink */
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);

1231
	if (intel_dp->psr.psr2_enabled)
1232 1233
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);

1234
	intel_dp->psr.enabled = false;
1235 1236
}

1237 1238 1239
/**
 * intel_psr_disable - Disable PSR
 * @intel_dp: Intel DP
1240
 * @old_crtc_state: old CRTC state
1241 1242 1243
 *
 * This function needs to be called before disabling pipe.
 */
1244 1245
void intel_psr_disable(struct intel_dp *intel_dp,
		       const struct intel_crtc_state *old_crtc_state)
1246
{
1247
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1248

1249
	if (!old_crtc_state->has_psr)
1250 1251
		return;

1252
	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1253 1254
		return;

1255
	mutex_lock(&intel_dp->psr.lock);
1256

1257
	intel_psr_disable_locked(intel_dp);
1258

1259 1260 1261
	mutex_unlock(&intel_dp->psr.lock);
	cancel_work_sync(&intel_dp->psr.work);
	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
R
Rodrigo Vivi 已提交
1262 1263
}

1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
/**
 * intel_psr_pause - Pause PSR
 * @intel_dp: Intel DP
 *
 * This function need to be called after enabling psr.
 */
void intel_psr_pause(struct intel_dp *intel_dp)
{
	struct intel_psr *psr = &intel_dp->psr;

	if (!CAN_PSR(intel_dp))
		return;

	mutex_lock(&psr->lock);

	if (!psr->enabled) {
		mutex_unlock(&psr->lock);
		return;
	}

	intel_psr_exit(intel_dp);
	intel_psr_wait_exit_locked(intel_dp);
	psr->paused = true;

	mutex_unlock(&psr->lock);

	cancel_work_sync(&psr->work);
	cancel_delayed_work_sync(&psr->dc3co_work);
}

/**
 * intel_psr_resume - Resume PSR
 * @intel_dp: Intel DP
 *
 * This function need to be called after pausing psr.
 */
void intel_psr_resume(struct intel_dp *intel_dp)
{
	struct intel_psr *psr = &intel_dp->psr;

	if (!CAN_PSR(intel_dp))
		return;

	mutex_lock(&psr->lock);

	if (!psr->paused)
		goto unlock;

	psr->paused = false;
	intel_psr_activate(intel_dp);

unlock:
	mutex_unlock(&psr->lock);
}

1319
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1320
{
1321 1322
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

1323
	if (DISPLAY_VER(dev_priv) >= 9)
1324 1325 1326 1327 1328 1329 1330 1331 1332
		/*
		 * Display WA #0884: skl+
		 * This documented WA for bxt can be safely applied
		 * broadly so we can force HW tracking to exit PSR
		 * instead of disabling and re-enabling.
		 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
		 * but it makes more sense write to the current active
		 * pipe.
		 */
1333
		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1334 1335 1336 1337 1338
	else
		/*
		 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
		 * on older gens so doing the manual exit instead.
		 */
1339
		intel_psr_exit(intel_dp);
1340 1341
}

1342 1343 1344 1345 1346 1347 1348
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
					const struct intel_crtc_state *crtc_state,
					const struct intel_plane_state *plane_state,
					int color_plane)
{
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
	enum pipe pipe = plane->pipe;
1349
	const struct drm_rect *clip;
1350 1351
	u32 val, offset;
	int ret, x, y;
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361

	if (!crtc_state->enable_psr2_sel_fetch)
		return;

	val = plane_state ? plane_state->ctl : 0;
	val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
	if (!val || plane->id == PLANE_CURSOR)
		return;

1362 1363 1364 1365
	clip = &plane_state->psr2_sel_fetch_area;

	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
	val |= plane_state->uapi.dst.x1;
1366 1367
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);

1368 1369 1370 1371 1372 1373 1374 1375
	/* TODO: consider auxiliary surfaces */
	x = plane_state->uapi.src.x1 >> 16;
	y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
	ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
	if (ret)
		drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
			      ret);
	val = y << 16 | x;
1376 1377 1378 1379
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
			  val);

	/* Sizes are 0 based */
1380
	val = (drm_rect_height(clip) - 1) << 16;
1381 1382 1383 1384
	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
}

1385 1386
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
1387
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1388 1389 1390 1391 1392

	if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
	    !crtc_state->enable_psr2_sel_fetch)
		return;

1393 1394
	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
		       crtc_state->psr2_man_track_ctl);
1395 1396
}

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
				  struct drm_rect *clip, bool full_update)
{
	u32 val = PSR2_MAN_TRK_CTL_ENABLE;

	if (full_update) {
		val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
		goto exit;
	}

	if (clip->y1 == -1)
		goto exit;

1410 1411
	drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);

1412 1413
	val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
	val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1414
	val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
exit:
	crtc_state->psr2_man_track_ctl = val;
}

static void clip_area_update(struct drm_rect *overlap_damage_area,
			     struct drm_rect *damage_area)
{
	if (overlap_damage_area->y1 == -1) {
		overlap_damage_area->y1 = damage_area->y1;
		overlap_damage_area->y2 = damage_area->y2;
		return;
	}

	if (damage_area->y1 < overlap_damage_area->y1)
		overlap_damage_area->y1 = damage_area->y1;

	if (damage_area->y2 > overlap_damage_area->y2)
		overlap_damage_area->y2 = damage_area->y2;
}

int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
				struct intel_crtc *crtc)
1437 1438
{
	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1439
	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1440 1441 1442 1443
	struct intel_plane_state *new_plane_state, *old_plane_state;
	struct intel_plane *plane;
	bool full_update = false;
	int i, ret;
1444 1445

	if (!crtc_state->enable_psr2_sel_fetch)
1446 1447 1448 1449 1450 1451
		return 0;

	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
	if (ret)
		return ret;

1452 1453 1454 1455 1456 1457
	/*
	 * Calculate minimal selective fetch area of each plane and calculate
	 * the pipe damaged area.
	 * In the next loop the plane selective fetch area will actually be set
	 * using whole pipe damaged area.
	 */
1458 1459
	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
					     new_plane_state, i) {
1460 1461 1462
		struct drm_rect src, damaged_area = { .y1 = -1 };
		struct drm_mode_rect *damaged_clips;
		u32 num_clips, j;
1463 1464 1465 1466

		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
			continue;

1467 1468 1469 1470
		if (!new_plane_state->uapi.visible &&
		    !old_plane_state->uapi.visible)
			continue;

1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
		/*
		 * TODO: Not clear how to handle planes with negative position,
		 * also planes are not updated if they have a negative X
		 * position so for now doing a full update in this cases
		 */
		if (new_plane_state->uapi.dst.y1 < 0 ||
		    new_plane_state->uapi.dst.x1 < 0) {
			full_update = true;
			break;
		}

1482
		num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
1483 1484

		/*
1485 1486 1487
		 * If visibility or plane moved, mark the whole plane area as
		 * damaged as it needs to be complete redraw in the new and old
		 * position.
1488
		 */
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
		    !drm_rect_equals(&new_plane_state->uapi.dst,
				     &old_plane_state->uapi.dst)) {
			if (old_plane_state->uapi.visible) {
				damaged_area.y1 = old_plane_state->uapi.dst.y1;
				damaged_area.y2 = old_plane_state->uapi.dst.y2;
				clip_area_update(&pipe_clip, &damaged_area);
			}

			if (new_plane_state->uapi.visible) {
				damaged_area.y1 = new_plane_state->uapi.dst.y1;
				damaged_area.y2 = new_plane_state->uapi.dst.y2;
				clip_area_update(&pipe_clip, &damaged_area);
			}
			continue;
		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
			   (!num_clips &&
			    new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
			/*
			 * If the plane don't have damaged areas but the
			 * framebuffer changed or alpha changed, mark the whole
			 * plane area as damaged.
			 */
			damaged_area.y1 = new_plane_state->uapi.dst.y1;
			damaged_area.y2 = new_plane_state->uapi.dst.y2;
			clip_area_update(&pipe_clip, &damaged_area);
			continue;
		}

		drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
		damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);

		for (j = 0; j < num_clips; j++) {
			struct drm_rect clip;

			clip.x1 = damaged_clips[j].x1;
			clip.y1 = damaged_clips[j].y1;
			clip.x2 = damaged_clips[j].x2;
			clip.y2 = damaged_clips[j].y2;
			if (drm_rect_intersect(&clip, &src))
				clip_area_update(&damaged_area, &clip);
		}

		if (damaged_area.y1 == -1)
			continue;

		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
		clip_area_update(&pipe_clip, &damaged_area);
	}

	if (full_update)
		goto skip_sel_fetch_set_loop;
1542

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
	/* It must be aligned to 4 lines */
	pipe_clip.y1 -= pipe_clip.y1 % 4;
	if (pipe_clip.y2 % 4)
		pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;

	/*
	 * Now that we have the pipe damaged area check if it intersect with
	 * every plane, if it does set the plane selective fetch area.
	 */
	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
					     new_plane_state, i) {
		struct drm_rect *sel_fetch_area, inter;

		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
		    !new_plane_state->uapi.visible)
			continue;

		inter = pipe_clip;
		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
			continue;

		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1567
	}
1568

1569
skip_sel_fetch_set_loop:
1570 1571
	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
	return 0;
1572 1573
}

1574 1575 1576 1577
/**
 * intel_psr_update - Update PSR state
 * @intel_dp: Intel DP
 * @crtc_state: new CRTC state
1578
 * @conn_state: new CONNECTOR state
1579 1580 1581 1582 1583 1584
 *
 * This functions will update PSR states, disabling, enabling or switching PSR
 * version when executing fastsets. For full modeset, intel_psr_disable() and
 * intel_psr_enable() should be called instead.
 */
void intel_psr_update(struct intel_dp *intel_dp,
1585 1586
		      const struct intel_crtc_state *crtc_state,
		      const struct drm_connector_state *conn_state)
1587 1588
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1589
	struct intel_psr *psr = &intel_dp->psr;
1590 1591
	bool enable, psr2_enable;

1592
	if (!CAN_PSR(intel_dp))
1593 1594
		return;

1595
	mutex_lock(&intel_dp->psr.lock);
1596

1597 1598
	enable = crtc_state->has_psr;
	psr2_enable = crtc_state->has_psr2;
1599

1600 1601
	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
	    crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
1602 1603
		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
		if (crtc_state->crc_enabled && psr->enabled)
1604
			psr_force_hw_tracking_exit(intel_dp);
1605
		else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) {
1606 1607 1608 1609
			/*
			 * Activate PSR again after a force exit when enabling
			 * CRC in older gens
			 */
1610 1611 1612
			if (!intel_dp->psr.active &&
			    !intel_dp->psr.busy_frontbuffer_bits)
				schedule_work(&intel_dp->psr.work);
1613
		}
1614

1615
		goto unlock;
1616
	}
1617

1618 1619
	if (psr->enabled)
		intel_psr_disable_locked(intel_dp);
1620

1621
	if (enable)
1622
		intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1623 1624

unlock:
1625
	mutex_unlock(&intel_dp->psr.lock);
1626 1627
}

1628
/**
1629 1630
 * psr_wait_for_idle - wait for PSR1 to idle
 * @intel_dp: Intel DP
1631 1632 1633
 * @out_value: PSR status in case of failure
 *
 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1634
 *
1635
 */
1636
static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
1637
{
1638
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1639 1640

	/*
1641 1642 1643 1644
	 * From bspec: Panel Self Refresh (BDW+)
	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
	 * defensive enough to cover everything.
1645
	 */
1646
	return __intel_wait_for_register(&dev_priv->uncore,
1647
					 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1648
					 EDP_PSR_STATUS_STATE_MASK,
1649 1650
					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
					 out_value);
1651 1652
}

1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
/**
 * intel_psr_wait_for_idle - wait for PSR1 to idle
 * @new_crtc_state: new CRTC state
 *
 * This function is expected to be called from pipe_update_start() where it is
 * not expected to race with PSR enable or disable.
 */
void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
	struct intel_encoder *encoder;

	if (!new_crtc_state->has_psr)
		return;

1668 1669
	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
					     new_crtc_state->uapi.encoder_mask) {
1670 1671 1672 1673
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
		u32 psr_status;

		mutex_lock(&intel_dp->psr.lock);
1674
		if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) {
1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}

		/* when the PSR1 is enabled */
		if (psr_wait_for_idle(intel_dp, &psr_status))
			drm_err(&dev_priv->drm,
				"PSR idle timed out 0x%x, atomic update may fail\n",
				psr_status);
		mutex_unlock(&intel_dp->psr.lock);
	}
}

static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
1689
{
1690
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1691 1692 1693 1694
	i915_reg_t reg;
	u32 mask;
	int err;

1695
	if (!intel_dp->psr.enabled)
1696
		return false;
R
Rodrigo Vivi 已提交
1697

1698 1699
	if (intel_dp->psr.psr2_enabled) {
		reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1700
		mask = EDP_PSR2_STATUS_STATE_MASK;
1701
	} else {
1702
		reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1703
		mask = EDP_PSR_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1704
	}
1705

1706
	mutex_unlock(&intel_dp->psr.lock);
1707

1708
	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1709
	if (err)
1710 1711
		drm_err(&dev_priv->drm,
			"Timed out waiting for PSR Idle for re-enable\n");
1712 1713

	/* After the unlocked wait, verify that PSR is still wanted! */
1714 1715
	mutex_lock(&intel_dp->psr.lock);
	return err == 0 && intel_dp->psr.enabled;
1716
}
R
Rodrigo Vivi 已提交
1717

1718
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1719
{
1720
	struct drm_connector_list_iter conn_iter;
1721 1722 1723
	struct drm_device *dev = &dev_priv->drm;
	struct drm_modeset_acquire_ctx ctx;
	struct drm_atomic_state *state;
1724 1725
	struct drm_connector *conn;
	int err = 0;
1726

1727 1728 1729
	state = drm_atomic_state_alloc(dev);
	if (!state)
		return -ENOMEM;
1730

1731 1732 1733 1734 1735
	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
	state->acquire_ctx = &ctx;

retry:

1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(conn, &conn_iter) {
		struct drm_connector_state *conn_state;
		struct drm_crtc_state *crtc_state;

		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
			continue;

		conn_state = drm_atomic_get_connector_state(state, conn);
		if (IS_ERR(conn_state)) {
			err = PTR_ERR(conn_state);
			break;
1748 1749
		}

1750 1751 1752 1753 1754 1755
		if (!conn_state->crtc)
			continue;

		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
		if (IS_ERR(crtc_state)) {
			err = PTR_ERR(crtc_state);
1756 1757
			break;
		}
1758 1759 1760

		/* Mark mode as changed to trigger a pipe->update() */
		crtc_state->mode_changed = true;
1761
	}
1762
	drm_connector_list_iter_end(&conn_iter);
1763

1764 1765
	if (err == 0)
		err = drm_atomic_commit(state);
1766

1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
	if (err == -EDEADLK) {
		drm_atomic_state_clear(state);
		err = drm_modeset_backoff(&ctx);
		if (!err)
			goto retry;
	}

	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);
	drm_atomic_state_put(state);

	return err;
1779 1780
}

1781
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
1782
{
1783
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1784 1785
	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
	u32 old_mode;
1786 1787 1788
	int ret;

	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1789
	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1790
		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
1791 1792 1793
		return -EINVAL;
	}

1794
	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
1795 1796 1797
	if (ret)
		return ret;

1798 1799
	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
	intel_dp->psr.debug = val;
1800 1801 1802 1803 1804

	/*
	 * Do it right away if it's already enabled, otherwise it will be done
	 * when enabling the source.
	 */
1805 1806
	if (intel_dp->psr.enabled)
		psr_irq_control(intel_dp);
1807

1808
	mutex_unlock(&intel_dp->psr.lock);
1809 1810 1811 1812

	if (old_mode != mode)
		ret = intel_psr_fastset_force(dev_priv);

1813 1814 1815
	return ret;
}

1816
static void intel_psr_handle_irq(struct intel_dp *intel_dp)
1817
{
1818
	struct intel_psr *psr = &intel_dp->psr;
1819

1820
	intel_psr_disable_locked(intel_dp);
1821 1822
	psr->sink_not_reliable = true;
	/* let's make sure that sink is awaken */
1823
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1824 1825
}

1826 1827
static void intel_psr_work(struct work_struct *work)
{
1828 1829
	struct intel_dp *intel_dp =
		container_of(work, typeof(*intel_dp), psr.work);
1830

1831
	mutex_lock(&intel_dp->psr.lock);
1832

1833
	if (!intel_dp->psr.enabled)
1834 1835
		goto unlock;

1836 1837
	if (READ_ONCE(intel_dp->psr.irq_aux_error))
		intel_psr_handle_irq(intel_dp);
1838

1839 1840 1841 1842 1843 1844
	/*
	 * We have to make sure PSR is ready for re-enable
	 * otherwise it keeps disabled until next full enable/disable cycle.
	 * PSR might take some time to get fully disabled
	 * and be ready for re-enable.
	 */
1845
	if (!__psr_wait_for_idle_locked(intel_dp))
R
Rodrigo Vivi 已提交
1846 1847 1848 1849 1850 1851 1852
		goto unlock;

	/*
	 * The delayed work can race with an invalidate hence we need to
	 * recheck. Since psr_flush first clears this and then reschedules we
	 * won't ever miss a flush when bailing out here.
	 */
1853
	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
R
Rodrigo Vivi 已提交
1854 1855
		goto unlock;

1856
	intel_psr_activate(intel_dp);
R
Rodrigo Vivi 已提交
1857
unlock:
1858
	mutex_unlock(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
1859 1860
}

R
Rodrigo Vivi 已提交
1861 1862
/**
 * intel_psr_invalidate - Invalidade PSR
1863
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1864
 * @frontbuffer_bits: frontbuffer plane tracking bits
1865
 * @origin: which operation caused the invalidate
R
Rodrigo Vivi 已提交
1866 1867 1868 1869 1870 1871 1872 1873
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 */
1874
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1875
			  unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1876
{
1877
	struct intel_encoder *encoder;
1878

1879
	if (origin == ORIGIN_FLIP)
1880 1881
		return;

1882
	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1883 1884
		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
R
Rodrigo Vivi 已提交
1885

1886 1887 1888 1889 1890
		mutex_lock(&intel_dp->psr.lock);
		if (!intel_dp->psr.enabled) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}
1891

1892 1893 1894
		pipe_frontbuffer_bits &=
			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
1895

1896 1897
		if (pipe_frontbuffer_bits)
			intel_psr_exit(intel_dp);
R
Rodrigo Vivi 已提交
1898

1899 1900 1901
		mutex_unlock(&intel_dp->psr.lock);
	}
}
1902 1903 1904 1905
/*
 * When we will be completely rely on PSR2 S/W tracking in future,
 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
 * event also therefore tgl_dc3co_flush() require to be changed
1906
 * accordingly in future.
1907 1908
 */
static void
1909 1910
tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
		enum fb_op_origin origin)
1911
{
1912
	mutex_lock(&intel_dp->psr.lock);
1913

1914
	if (!intel_dp->psr.dc3co_exitline)
1915 1916
		goto unlock;

1917
	if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
1918 1919 1920 1921 1922 1923 1924
		goto unlock;

	/*
	 * At every frontbuffer flush flip event modified delay of delayed work,
	 * when delayed work schedules that means display has been idle.
	 */
	if (!(frontbuffer_bits &
1925
	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
1926 1927
		goto unlock;

1928 1929 1930
	tgl_psr2_enable_dc3co(intel_dp);
	mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
			 intel_dp->psr.dc3co_exit_delay);
1931 1932

unlock:
1933
	mutex_unlock(&intel_dp->psr.lock);
1934 1935
}

R
Rodrigo Vivi 已提交
1936 1937
/**
 * intel_psr_flush - Flush PSR
1938
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1939
 * @frontbuffer_bits: frontbuffer plane tracking bits
1940
 * @origin: which operation caused the flush
R
Rodrigo Vivi 已提交
1941 1942 1943 1944 1945 1946 1947 1948
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering has completed and flushed out to memory. PSR
 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
 */
1949
void intel_psr_flush(struct drm_i915_private *dev_priv,
1950
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1951
{
1952
	struct intel_encoder *encoder;
1953

1954
	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1955 1956
		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1957

1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
		if (origin == ORIGIN_FLIP) {
			tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
			continue;
		}

		mutex_lock(&intel_dp->psr.lock);
		if (!intel_dp->psr.enabled) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}
R
Rodrigo Vivi 已提交
1968

1969 1970 1971
		pipe_frontbuffer_bits &=
			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
R
Rodrigo Vivi 已提交
1972

1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
		/*
		 * If the PSR is paused by an explicit intel_psr_paused() call,
		 * we have to ensure that the PSR is not activated until
		 * intel_psr_resume() is called.
		 */
		if (intel_dp->psr.paused) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}

1983 1984 1985
		/* By definition flush = invalidate + flush */
		if (pipe_frontbuffer_bits)
			psr_force_hw_tracking_exit(intel_dp);
1986

1987 1988 1989 1990
		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
			schedule_work(&intel_dp->psr.work);
		mutex_unlock(&intel_dp->psr.lock);
	}
R
Rodrigo Vivi 已提交
1991 1992
}

R
Rodrigo Vivi 已提交
1993 1994
/**
 * intel_psr_init - Init basic PSR work and mutex.
1995
 * @intel_dp: Intel DP
R
Rodrigo Vivi 已提交
1996
 *
1997 1998 1999
 * This function is called after the initializing connector.
 * (the initializing of connector treats the handling of connector capabilities)
 * And it initializes basic PSR stuff for each DP Encoder.
R
Rodrigo Vivi 已提交
2000
 */
2001
void intel_psr_init(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
2002
{
2003 2004 2005
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

2006 2007 2008
	if (!HAS_PSR(dev_priv))
		return;

2009 2010 2011 2012 2013 2014 2015 2016 2017
	/*
	 * HSW spec explicitly says PSR is tied to port A.
	 * BDW+ platforms have a instance of PSR registers per transcoder but
	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
	 * than eDP one.
	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
	 * But GEN12 supports a instance of PSR registers per transcoder.
	 */
2018
	if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
2019 2020 2021 2022 2023 2024 2025
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Port not supported\n");
		return;
	}

	intel_dp->psr.source_support = true;

2026 2027 2028 2029 2030 2031 2032 2033
	if (IS_HASWELL(dev_priv))
		/*
		 * HSW don't have PSR registers on the same space as transcoder
		 * so set this to a value that when subtract to the register
		 * in transcoder space results in the right offset for HSW
		 */
		dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;

2034
	if (dev_priv->params.enable_psr == -1)
2035
		if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
2036
			dev_priv->params.enable_psr = 0;
2037

2038
	/* Set link_standby x link_off defaults */
2039
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
2040
		/* HSW and BDW require workarounds that we don't implement. */
2041
		intel_dp->psr.link_standby = false;
2042
	else if (DISPLAY_VER(dev_priv) < 12)
2043
		/* For new platforms up to TGL let's respect VBT back again */
2044
		intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
2045

2046 2047 2048
	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
	mutex_init(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
2049
}
2050

2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
					   u8 *status, u8 *error_status)
{
	struct drm_dp_aux *aux = &intel_dp->aux;
	int ret;

	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
	if (ret != 1)
		return ret;

	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
	if (ret != 1)
		return ret;

	*status = *status & DP_PSR_SINK_STATE_MASK;

	return 0;
}

2070 2071 2072 2073
static void psr_alpm_check(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct drm_dp_aux *aux = &intel_dp->aux;
2074
	struct intel_psr *psr = &intel_dp->psr;
2075 2076 2077 2078 2079 2080 2081 2082
	u8 val;
	int r;

	if (!psr->psr2_enabled)
		return;

	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
	if (r != 1) {
2083
		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2084 2085 2086 2087 2088 2089
		return;
	}

	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
		intel_psr_disable_locked(intel_dp);
		psr->sink_not_reliable = true;
2090 2091
		drm_dbg_kms(&dev_priv->drm,
			    "ALPM lock timeout error, disabling PSR\n");
2092 2093 2094 2095 2096 2097

		/* Clearing error */
		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
	}
}

2098 2099 2100
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2101
	struct intel_psr *psr = &intel_dp->psr;
2102 2103 2104 2105 2106
	u8 val;
	int r;

	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
	if (r != 1) {
2107
		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2108 2109 2110 2111 2112 2113
		return;
	}

	if (val & DP_PSR_CAPS_CHANGE) {
		intel_psr_disable_locked(intel_dp);
		psr->sink_not_reliable = true;
2114 2115
		drm_dbg_kms(&dev_priv->drm,
			    "Sink PSR capability changed, disabling PSR\n");
2116 2117 2118 2119 2120 2121

		/* Clearing it */
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
	}
}

2122 2123
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
2124
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2125
	struct intel_psr *psr = &intel_dp->psr;
2126
	u8 status, error_status;
2127
	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2128 2129
			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
			  DP_PSR_LINK_CRC_ERROR;
2130

2131
	if (!CAN_PSR(intel_dp))
2132 2133 2134 2135
		return;

	mutex_lock(&psr->lock);

2136
	if (!psr->enabled)
2137 2138
		goto exit;

2139
	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2140 2141
		drm_err(&dev_priv->drm,
			"Error reading PSR status or error status\n");
2142 2143 2144
		goto exit;
	}

2145
	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2146
		intel_psr_disable_locked(intel_dp);
2147
		psr->sink_not_reliable = true;
2148 2149
	}

2150
	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2151 2152
		drm_dbg_kms(&dev_priv->drm,
			    "PSR sink internal error, disabling PSR\n");
2153
	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2154 2155
		drm_dbg_kms(&dev_priv->drm,
			    "PSR RFB storage error, disabling PSR\n");
2156
	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2157 2158
		drm_dbg_kms(&dev_priv->drm,
			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
2159
	if (error_status & DP_PSR_LINK_CRC_ERROR)
2160 2161
		drm_dbg_kms(&dev_priv->drm,
			    "PSR Link CRC error, disabling PSR\n");
2162

2163
	if (error_status & ~errors)
2164 2165 2166
		drm_err(&dev_priv->drm,
			"PSR_ERROR_STATUS unhandled errors %x\n",
			error_status & ~errors);
2167
	/* clear status register */
2168
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2169 2170

	psr_alpm_check(intel_dp);
2171
	psr_capability_changed_check(intel_dp);
2172

2173 2174 2175
exit:
	mutex_unlock(&psr->lock);
}
2176 2177 2178 2179 2180

bool intel_psr_enabled(struct intel_dp *intel_dp)
{
	bool ret;

2181
	if (!CAN_PSR(intel_dp))
2182 2183
		return false;

2184 2185 2186
	mutex_lock(&intel_dp->psr.lock);
	ret = intel_dp->psr.enabled;
	mutex_unlock(&intel_dp->psr.lock);
2187 2188 2189

	return ret;
}