intel_psr.c 59.8 KB
Newer Older
R
Rodrigo Vivi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

24 25
#include <drm/drm_atomic_helper.h>

26 27
#include "display/intel_dp.h"

28
#include "i915_drv.h"
29
#include "intel_atomic.h"
30
#include "intel_display_types.h"
31 32
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
33
#include "intel_psr.h"
34
#include "intel_sprite.h"
35
#include "skl_universal_plane.h"
36

R
Rodrigo Vivi 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/**
 * DOC: Panel Self Refresh (PSR/SRD)
 *
 * Since Haswell Display controller supports Panel Self-Refresh on display
 * panels witch have a remote frame buffer (RFB) implemented according to PSR
 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
 * when system is idle but display is on as it eliminates display refresh
 * request to DDR memory completely as long as the frame buffer for that
 * display is unchanged.
 *
 * Panel Self Refresh must be supported by both Hardware (source) and
 * Panel (sink).
 *
 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
 * to power down the link and memory controller. For DSI panels the same idea
 * is called "manual mode".
 *
 * The implementation uses the hardware-based PSR support which automatically
 * enters/exits self-refresh mode. The hardware takes care of sending the
 * required DP aux message and could even retrain the link (that part isn't
 * enabled yet though). The hardware also keeps track of any frontbuffer
 * changes to know when to exit self-refresh mode again. Unfortunately that
 * part doesn't work too well, hence why the i915 PSR support uses the
 * software frontbuffer tracking to make sure it doesn't miss a screen
 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
 * get called by the frontbuffer tracking code. Note that because of locking
 * issues the self-refresh re-enable code is done from a work queue, which
 * must be correctly synchronized/cancelled when shutting down the pipe."
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
 *
 * DC3CO (DC3 clock off)
 *
 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
 * clock off automatically during PSR2 idle state.
 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
 * entry/exit allows the HW to enter a low-power state even when page flipping
 * periodically (for instance a 30fps video playback scenario).
 *
 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
 * frames, if no other flip occurs and the function above is executed, DC3CO is
 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
 * of another flip.
 * Front buffer modifications do not trigger DC3CO activation on purpose as it
 * would bring a lot of complexity and most of the moderns systems will only
 * use page flips.
R
Rodrigo Vivi 已提交
82 83
 */

84
static bool psr_global_enabled(struct intel_dp *intel_dp)
85
{
86 87 88
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
89
	case I915_PSR_DEBUG_DEFAULT:
90
		return i915->params.enable_psr;
91 92 93 94 95 96 97
	case I915_PSR_DEBUG_DISABLE:
		return false;
	default:
		return true;
	}
}

98
static bool psr2_global_enabled(struct intel_dp *intel_dp)
99
{
100
	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
101
	case I915_PSR_DEBUG_DISABLE:
102 103 104
	case I915_PSR_DEBUG_FORCE_PSR1:
		return false;
	default:
105
		return true;
106 107 108
	}
}

109
static void psr_irq_control(struct intel_dp *intel_dp)
110
{
111
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
112 113
	enum transcoder trans_shift;
	i915_reg_t imr_reg;
114
	u32 mask, val;
115

116 117 118 119 120 121 122
	/*
	 * gen12+ has registers relative to transcoder and one per transcoder
	 * using the same bit definition: handle it as TRANSCODER_EDP to force
	 * 0 shift in bit definition
	 */
	if (INTEL_GEN(dev_priv) >= 12) {
		trans_shift = 0;
123
		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
124
	} else {
125
		trans_shift = intel_dp->psr.transcoder;
126 127 128 129
		imr_reg = EDP_PSR_IMR;
	}

	mask = EDP_PSR_ERROR(trans_shift);
130
	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
131 132
		mask |= EDP_PSR_POST_EXIT(trans_shift) |
			EDP_PSR_PRE_ENTRY(trans_shift);
133 134

	/* Warning: it is masking/setting reserved bits too */
135
	val = intel_de_read(dev_priv, imr_reg);
136
	val &= ~EDP_PSR_TRANS_MASK(trans_shift);
137
	val |= ~mask;
138
	intel_de_write(dev_priv, imr_reg, val);
139 140
}

141 142
static void psr_event_print(struct drm_i915_private *i915,
			    u32 val, bool psr2_enabled)
143
{
144
	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
145
	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
146
		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
147
	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
148
		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
149
	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
150
		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
151
	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
152
		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
153
	if (val & PSR_EVENT_GRAPHICS_RESET)
154
		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
155
	if (val & PSR_EVENT_PCH_INTERRUPT)
156
		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
157
	if (val & PSR_EVENT_MEMORY_UP)
158
		drm_dbg_kms(&i915->drm, "\tMemory up\n");
159
	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
160
		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
161
	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
162
		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
163
	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
164
		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
165
	if (val & PSR_EVENT_REGISTER_UPDATE)
166
		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
167
	if (val & PSR_EVENT_HDCP_ENABLE)
168
		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
169
	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
170
		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
171
	if (val & PSR_EVENT_VBI_ENABLE)
172
		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
173
	if (val & PSR_EVENT_LPSP_MODE_EXIT)
174
		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
175
	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
176
		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
177 178
}

179
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
180
{
181 182 183
	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	ktime_t time_ns =  ktime_get();
184 185
	enum transcoder trans_shift;
	i915_reg_t imr_reg;
186

187 188
	if (INTEL_GEN(dev_priv) >= 12) {
		trans_shift = 0;
189
		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
190
	} else {
191
		trans_shift = intel_dp->psr.transcoder;
192 193 194 195
		imr_reg = EDP_PSR_IMR;
	}

	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
196
		intel_dp->psr.last_entry_attempt = time_ns;
197 198 199
		drm_dbg_kms(&dev_priv->drm,
			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
			    transcoder_name(cpu_transcoder));
200
	}
201

202
	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
203
		intel_dp->psr.last_exit = time_ns;
204 205 206
		drm_dbg_kms(&dev_priv->drm,
			    "[transcoder %s] PSR exit completed\n",
			    transcoder_name(cpu_transcoder));
207

208
		if (INTEL_GEN(dev_priv) >= 9) {
209 210
			u32 val = intel_de_read(dev_priv,
						PSR_EVENT(cpu_transcoder));
211
			bool psr2_enabled = intel_dp->psr.psr2_enabled;
212

213 214
			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
				       val);
215
			psr_event_print(dev_priv, val, psr2_enabled);
216
		}
217
	}
218

219
	if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
220
		u32 val;
221

222
		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
223
			 transcoder_name(cpu_transcoder));
224

225
		intel_dp->psr.irq_aux_error = true;
226

227 228 229 230 231 232 233 234
		/*
		 * If this interruption is not masked it will keep
		 * interrupting so fast that it prevents the scheduled
		 * work to run.
		 * Also after a PSR error, we don't want to arm PSR
		 * again so we don't care about unmask the interruption
		 * or unset irq_aux_error.
		 */
235
		val = intel_de_read(dev_priv, imr_reg);
236
		val |= EDP_PSR_ERROR(trans_shift);
237
		intel_de_write(dev_priv, imr_reg, val);
238

239
		schedule_work(&intel_dp->psr.work);
240
	}
241 242
}

243 244
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
245
	u8 alpm_caps = 0;
246 247 248 249 250 251 252

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
			      &alpm_caps) != 1)
		return false;
	return alpm_caps & DP_ALPM_CAP;
}

253 254
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
255
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
256
	u8 val = 8; /* assume the worst if we can't read the value */
257 258 259 260 261

	if (drm_dp_dpcd_readb(&intel_dp->aux,
			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
	else
262 263
		drm_dbg_kms(&i915->drm,
			    "Unable to get sink synchronization latency, assuming 8 frames\n");
264 265 266
	return val;
}

267 268
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
{
269
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
270 271 272 273 274 275 276 277 278 279 280 281
	u16 val;
	ssize_t r;

	/*
	 * Returning the default X granularity if granularity not required or
	 * if DPCD read fails
	 */
	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
		return 4;

	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
	if (r != 2)
282 283
		drm_dbg_kms(&i915->drm,
			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
284 285 286 287 288 289 290 291 292 293 294

	/*
	 * Spec says that if the value read is 0 the default granularity should
	 * be used instead.
	 */
	if (r != 2 || val == 0)
		val = 4;

	return val;
}

295 296 297 298 299 300 301 302
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);

	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
			 sizeof(intel_dp->psr_dpcd));

303 304
	if (!intel_dp->psr_dpcd[0])
		return;
305 306
	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
		    intel_dp->psr_dpcd[0]);
307

308
	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
309 310
		drm_dbg_kms(&dev_priv->drm,
			    "PSR support not currently available for this panel\n");
311 312 313
		return;
	}

314
	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
315 316
		drm_dbg_kms(&dev_priv->drm,
			    "Panel lacks power state control, PSR cannot be enabled\n");
317 318
		return;
	}
319

320 321
	intel_dp->psr.sink_support = true;
	intel_dp->psr.sink_sync_latency =
322
		intel_dp_get_sink_sync_latency(intel_dp);
323 324

	if (INTEL_GEN(dev_priv) >= 9 &&
325
	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
326 327 328 329
		bool y_req = intel_dp->psr_dpcd[1] &
			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
		bool alpm = intel_dp_get_alpm_status(intel_dp);

330 331 332 333 334 335 336 337 338 339 340
		/*
		 * All panels that supports PSR version 03h (PSR2 +
		 * Y-coordinate) can handle Y-coordinates in VSC but we are
		 * only sure that it is going to be used when required by the
		 * panel. This way panel is capable to do selective update
		 * without a aux frame sync.
		 *
		 * To support PSR version 02h and PSR version 03h without
		 * Y-coordinate requirement panels we would need to enable
		 * GTC first.
		 */
341
		intel_dp->psr.sink_psr2_support = y_req && alpm;
342
		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
343
			    intel_dp->psr.sink_psr2_support ? "" : "not ");
344

345 346
		if (intel_dp->psr.sink_psr2_support) {
			intel_dp->psr.colorimetry_support =
347
				intel_dp_get_colorimetry_status(intel_dp);
348
			intel_dp->psr.su_x_granularity =
349
				intel_dp_get_su_x_granulartiy(intel_dp);
350 351 352 353
		}
	}
}

354
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
355
{
356
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
357 358
	u32 aux_clock_divider, aux_ctl;
	int i;
359
	static const u8 aux_msg[] = {
R
Rodrigo Vivi 已提交
360 361 362 363 364 365
		[0] = DP_AUX_NATIVE_WRITE << 4,
		[1] = DP_SET_POWER >> 8,
		[2] = DP_SET_POWER & 0xff,
		[3] = 1 - 1,
		[4] = DP_SET_POWER_D0,
	};
366 367 368 369
	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
R
Rodrigo Vivi 已提交
370 371

	BUILD_BUG_ON(sizeof(aux_msg) > 20);
372
	for (i = 0; i < sizeof(aux_msg); i += 4)
373
		intel_de_write(dev_priv,
374
			       EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
375
			       intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
376

377 378 379
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);

	/* Start with bits set for DDI_AUX_CTL register */
380
	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
381
					     aux_clock_divider);
382 383 384

	/* Select only valid bits for SRD_AUX_CTL */
	aux_ctl &= psr_aux_mask;
385
	intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
386
		       aux_ctl);
387 388
}

389
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
390
{
391
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
392
	u8 dpcd_val = DP_PSR_ENABLE;
393

394
	/* Enable ALPM at sink for psr2 */
395
	if (intel_dp->psr.psr2_enabled) {
396
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
397 398 399
				   DP_ALPM_ENABLE |
				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);

400
		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
401
	} else {
402
		if (intel_dp->psr.link_standby)
403
			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
404 405 406

		if (INTEL_GEN(dev_priv) >= 8)
			dpcd_val |= DP_PSR_CRC_VERIFICATION;
407 408
	}

409
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
410

411
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
R
Rodrigo Vivi 已提交
412 413
}

414
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
415
{
416
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
417
	u32 val = 0;
418

419 420 421
	if (INTEL_GEN(dev_priv) >= 11)
		val |= EDP_PSR_TP4_TIME_0US;

422
	if (dev_priv->params.psr_safest_params) {
423 424 425 426 427
		val |= EDP_PSR_TP1_TIME_2500us;
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
		goto check_tp3_sel;
	}

428
	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
429
		val |= EDP_PSR_TP1_TIME_0us;
430
	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
431
		val |= EDP_PSR_TP1_TIME_100us;
432 433
	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
		val |= EDP_PSR_TP1_TIME_500us;
434
	else
435
		val |= EDP_PSR_TP1_TIME_2500us;
436

437
	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
438
		val |= EDP_PSR_TP2_TP3_TIME_0us;
439
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
440
		val |= EDP_PSR_TP2_TP3_TIME_100us;
441 442
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
		val |= EDP_PSR_TP2_TP3_TIME_500us;
443
	else
444
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
445

446
check_tp3_sel:
447 448 449 450 451 452
	if (intel_dp_source_supports_hbr2(intel_dp) &&
	    drm_dp_tps3_supported(intel_dp->dpcd))
		val |= EDP_PSR_TP1_TP3_SEL;
	else
		val |= EDP_PSR_TP1_TP2_SEL;

453 454 455
	return val;
}

456
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
457 458
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
459
	int idle_frames;
460 461 462 463

	/* Let's use 6 as the minimum to cover all known cases including the
	 * off-by-one issue that HW has in some cases.
	 */
464
	idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
465
	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
466

467
	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
468 469 470 471 472 473 474 475 476 477 478 479
		idle_frames = 0xf;

	return idle_frames;
}

static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 max_sleep_time = 0x1f;
	u32 val = EDP_PSR_ENABLE;

	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
480 481 482 483 484

	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
	if (IS_HASWELL(dev_priv))
		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;

485
	if (intel_dp->psr.link_standby)
486 487 488 489
		val |= EDP_PSR_LINK_STANDBY;

	val |= intel_psr1_get_tp_time(intel_dp);

490 491 492
	if (INTEL_GEN(dev_priv) >= 8)
		val |= EDP_PSR_CRC_ENABLE;

493
	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
494
		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
495
	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
496
}
497

498
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
499
{
500
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
501
	u32 val = 0;
502

503
	if (dev_priv->params.psr_safest_params)
504
		return EDP_PSR2_TP2_TIME_2500us;
505

506 507
	if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
508
		val |= EDP_PSR2_TP2_TIME_50us;
509
	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
510
		val |= EDP_PSR2_TP2_TIME_100us;
511
	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
512
		val |= EDP_PSR2_TP2_TIME_500us;
513
	else
514
		val |= EDP_PSR2_TP2_TIME_2500us;
515

516 517 518 519 520 521 522 523 524 525 526 527 528 529
	return val;
}

static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 val;

	val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;

	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
		val |= EDP_Y_COORDINATE_ENABLE;

530
	val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
531 532
	val |= intel_psr2_get_tp_time(intel_dp);

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
	if (INTEL_GEN(dev_priv) >= 12) {
		/*
		 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
		 * values from BSpec. In order to setting an optimal power
		 * consumption, lower than 4k resoluition mode needs to decrese
		 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
		 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
		 */
		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
		val |= TGL_EDP_PSR2_FAST_WAKE(7);
	} else if (INTEL_GEN(dev_priv) >= 9) {
		val |= EDP_PSR2_IO_BUFFER_WAKE(7);
		val |= EDP_PSR2_FAST_WAKE(7);
	}

549
	if (intel_dp->psr.psr2_sel_fetch_enabled) {
550
		/* WA 1408330847 */
551
		if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
552 553 554 555 556
		    IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
			intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
				     DIS_RAM_BYPASS_PSR2_MAN_TRACK,
				     DIS_RAM_BYPASS_PSR2_MAN_TRACK);

557
		intel_de_write(dev_priv,
558
			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
559
			       PSR2_MAN_TRK_CTL_ENABLE);
560
	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
561
		intel_de_write(dev_priv,
562
			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
563
	}
564

565
	/*
566 567
	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
	 * recommending keep this bit unset while PSR2 is enabled.
568
	 */
569
	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
570

571
	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
R
Rodrigo Vivi 已提交
572 573
}

574 575 576
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
577 578 579
	if (INTEL_GEN(dev_priv) < 9)
		return false;
	else if (INTEL_GEN(dev_priv) >= 12)
580 581 582 583 584
		return trans == TRANSCODER_A;
	else
		return trans == TRANSCODER_EDP;
}

585 586
static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
{
587
	if (!cstate || !cstate->hw.active)
588 589 590
		return 0;

	return DIV_ROUND_UP(1000 * 1000,
591
			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
592 593
}

594
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
595 596
				     u32 idle_frames)
{
597
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
598 599 600
	u32 val;

	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
601
	val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
602 603
	val &= ~EDP_PSR2_IDLE_FRAME_MASK;
	val |= idle_frames;
604
	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
605 606
}

607
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
608
{
609 610 611
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

	psr2_program_idle_frames(intel_dp, 0);
612 613 614
	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}

615
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
616
{
617
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
618 619

	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
620
	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
621 622
}

623
static void tgl_dc3co_disable_work(struct work_struct *work)
624
{
625 626
	struct intel_dp *intel_dp =
		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
627

628
	mutex_lock(&intel_dp->psr.lock);
629
	/* If delayed work is pending, it is not idle */
630
	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
631 632
		goto unlock;

633
	tgl_psr2_disable_dc3co(intel_dp);
634
unlock:
635
	mutex_unlock(&intel_dp->psr.lock);
636 637
}

638
static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
639
{
640
	if (!intel_dp->psr.dc3co_enabled)
641 642
		return;

643
	cancel_delayed_work(&intel_dp->psr.dc3co_work);
644
	/* Before PSR2 exit disallow dc3co*/
645
	tgl_psr2_disable_dc3co(intel_dp);
646 647
}

648 649 650 651 652 653 654 655 656
static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
				  struct intel_crtc_state *crtc_state)
{
	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 exit_scanlines;

657 658 659 660 661 662 663
	/*
	 * DMC's DC3CO exit mechanism has an issue with Selective Fecth
	 * TODO: when the issue is addressed, this restriction should be removed.
	 */
	if (crtc_state->enable_psr2_sel_fetch)
		return;

664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
	if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
		return;

	/* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
	    dig_port->base.port != PORT_A)
		return;

	/*
	 * DC3CO Exit time 200us B.Spec 49196
	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
	 */
	exit_scanlines =
		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;

679
	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
680 681 682 683 684
		return;

	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
}

685 686 687 688 689 690 691 692 693
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
					      struct intel_crtc_state *crtc_state)
{
	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;

694 695
	if (!dev_priv->params.enable_psr2_sel_fetch &&
	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 sel fetch not enabled, disabled by parameter\n");
		return false;
	}

	if (crtc_state->uapi.async_flip) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 sel fetch not enabled, async flip enabled\n");
		return false;
	}

	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
			drm_dbg_kms(&dev_priv->drm,
				    "PSR2 sel fetch not enabled, plane rotated\n");
			return false;
		}
	}

	return crtc_state->enable_psr2_sel_fetch = true;
}

718 719 720
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
				    struct intel_crtc_state *crtc_state)
{
721
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
722 723
	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
724
	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
725

726
	if (!intel_dp->psr.sink_psr2_support)
727 728
		return false;

729 730 731 732 733 734
	/* JSL and EHL only supports eDP 1.3 */
	if (IS_JSL_EHL(dev_priv)) {
		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
		return false;
	}

735
	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
736 737 738
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not supported in transcoder %s\n",
			    transcoder_name(crtc_state->cpu_transcoder));
739 740 741
		return false;
	}

742
	if (!psr2_global_enabled(intel_dp)) {
743 744 745 746
		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
		return false;
	}

747 748 749 750 751
	/*
	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
	 * resolution requires DSC to be enabled, priority is given to DSC
	 * over PSR2.
	 */
752
	if (crtc_state->dsc.compression_enable) {
753 754
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 cannot be enabled since DSC is enabled\n");
755 756 757
		return false;
	}

758 759 760 761 762 763
	if (crtc_state->crc_enabled) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
		return false;
	}

764 765 766
	if (INTEL_GEN(dev_priv) >= 12) {
		psr_max_h = 5120;
		psr_max_v = 3200;
767
		max_bpp = 30;
768
	} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
769 770
		psr_max_h = 4096;
		psr_max_v = 2304;
771
		max_bpp = 24;
772
	} else if (IS_GEN(dev_priv, 9)) {
773 774
		psr_max_h = 3640;
		psr_max_v = 2304;
775
		max_bpp = 24;
776 777
	}

778
	if (crtc_state->pipe_bpp > max_bpp) {
779 780 781
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
			    crtc_state->pipe_bpp, max_bpp);
782 783 784
		return false;
	}

785 786 787
	/*
	 * HW sends SU blocks of size four scan lines, which means the starting
	 * X coordinate and Y granularity requirements will always be met. We
788 789
	 * only need to validate the SU block width is a multiple of
	 * x granularity.
790
	 */
791
	if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
792 793
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
794
			    crtc_hdisplay, intel_dp->psr.su_x_granularity);
795 796 797
		return false;
	}

798 799 800 801 802 803 804
	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
		    !HAS_PSR_HW_TRACKING(dev_priv)) {
			drm_dbg_kms(&dev_priv->drm,
				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
			return false;
		}
805 806
	}

807 808
	if (!crtc_state->enable_psr2_sel_fetch &&
	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
809 810 811 812
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
			    crtc_hdisplay, crtc_vdisplay,
			    psr_max_h, psr_max_v);
813 814 815
		return false;
	}

816
	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
817 818 819
	return true;
}

820 821
void intel_psr_compute_config(struct intel_dp *intel_dp,
			      struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
822
{
823
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
824
	const struct drm_display_mode *adjusted_mode =
825
		&crtc_state->hw.adjusted_mode;
826
	int psr_setup_time;
R
Rodrigo Vivi 已提交
827

828 829 830 831 832 833 834
	/*
	 * Current PSR panels dont work reliably with VRR enabled
	 * So if VRR is enabled, do not enable PSR.
	 */
	if (crtc_state->vrr.enable)
		return;

835
	if (!CAN_PSR(intel_dp))
836 837
		return;

838
	if (!psr_global_enabled(intel_dp)) {
839
		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
840
		return;
841 842
	}

843
	if (intel_dp->psr.sink_not_reliable) {
844 845
		drm_dbg_kms(&dev_priv->drm,
			    "PSR sink implementation is not reliable\n");
846 847 848
		return;
	}

849
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
850 851
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Interlaced mode enabled\n");
852
		return;
R
Rodrigo Vivi 已提交
853 854
	}

855 856
	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
	if (psr_setup_time < 0) {
857 858 859
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
			    intel_dp->psr_dpcd[1]);
860
		return;
861 862 863 864
	}

	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
865 866 867
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: PSR setup time (%d us) too long\n",
			    psr_setup_time);
868 869 870 871
		return;
	}

	crtc_state->has_psr = true;
872
	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
873
	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
R
Rodrigo Vivi 已提交
874 875
}

876
static void intel_psr_activate(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
877
{
878
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
879
	enum transcoder transcoder = intel_dp->psr.transcoder;
R
Rodrigo Vivi 已提交
880

881
	if (transcoder_has_psr2(dev_priv, transcoder))
882
		drm_WARN_ON(&dev_priv->drm,
883
			    intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
884

885
	drm_WARN_ON(&dev_priv->drm,
886 887 888
		    intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
	lockdep_assert_held(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
889

890
	/* psr1 and psr2 are mutually exclusive.*/
891
	if (intel_dp->psr.psr2_enabled)
892 893 894 895
		hsw_activate_psr2(intel_dp);
	else
		hsw_activate_psr1(intel_dp);

896
	intel_dp->psr.active = true;
R
Rodrigo Vivi 已提交
897 898
}

899 900
static void intel_psr_enable_source(struct intel_dp *intel_dp,
				    const struct intel_crtc_state *crtc_state)
901
{
902
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
903
	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
904
	u32 mask;
905

906 907 908 909 910 911
	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
	 * use hardcoded values PSR AUX transactions
	 */
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_psr_setup_aux(intel_dp);

912
	if (intel_dp->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
913
					   !IS_GEMINILAKE(dev_priv))) {
914
		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
915
		u32 chicken = intel_de_read(dev_priv, reg);
916

917 918
		chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
			   PSR2_ADD_VERTICAL_LINE_COUNT;
919
		intel_de_write(dev_priv, reg, chicken);
920
	}
921 922 923 924 925 926 927

	/*
	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
	 * mask LPSP to avoid dependency on other drivers that might block
	 * runtime_pm besides preventing  other hw tracking issues now we
	 * can rely on frontbuffer tracking.
	 */
928 929 930 931 932 933 934 935
	mask = EDP_PSR_DEBUG_MASK_MEMUP |
	       EDP_PSR_DEBUG_MASK_HPD |
	       EDP_PSR_DEBUG_MASK_LPSP |
	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;

	if (INTEL_GEN(dev_priv) < 11)
		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;

936
	intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
937
		       mask);
938

939
	psr_irq_control(intel_dp);
940 941 942 943 944 945 946 947

	if (crtc_state->dc3co_exitline) {
		u32 val;

		/*
		 * TODO: if future platforms supports DC3CO in more than one
		 * transcoder, EXITLINE will need to be unset when disabling PSR
		 */
948
		val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
949 950 951
		val &= ~EXITLINE_MASK;
		val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
		val |= EXITLINE_ENABLE;
952
		intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
953
	}
954

955
	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
956
		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
957
			     intel_dp->psr.psr2_sel_fetch_enabled ?
958
			     IGNORE_PSR2_HW_TRACKING : 0);
959 960
}

961
static void intel_psr_enable_locked(struct intel_dp *intel_dp,
962 963
				    const struct intel_crtc_state *crtc_state,
				    const struct drm_connector_state *conn_state)
964
{
965
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
966
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
967
	struct intel_encoder *encoder = &dig_port->base;
968
	u32 val;
969

970
	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
971

972 973 974 975 976
	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
	intel_dp->psr.busy_frontbuffer_bits = 0;
	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
	intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
977 978
	/* DC5/DC6 requires at least 6 idle frames */
	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
979 980
	intel_dp->psr.dc3co_exit_delay = val;
	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
981 982 983 984 985 986 987 988 989

	/*
	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
	 * will still keep the error set even after the reset done in the
	 * irq_preinstall and irq_uninstall hooks.
	 * And enabling in this situation cause the screen to freeze in the
	 * first time that PSR HW tries to activate so lets keep PSR disabled
	 * to avoid any rendering problems.
	 */
990
	if (INTEL_GEN(dev_priv) >= 12) {
991
		val = intel_de_read(dev_priv,
992
				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
993 994
		val &= EDP_PSR_ERROR(0);
	} else {
995
		val = intel_de_read(dev_priv, EDP_PSR_IIR);
996
		val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
997
	}
998
	if (val) {
999
		intel_dp->psr.sink_not_reliable = true;
1000 1001
		drm_dbg_kms(&dev_priv->drm,
			    "PSR interruption error set, not enabling PSR\n");
1002 1003
		return;
	}
1004

1005
	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
1006
		    intel_dp->psr.psr2_enabled ? "2" : "1");
1007
	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1008 1009
				     &intel_dp->psr.vsc);
	intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
1010 1011
	intel_psr_enable_sink(intel_dp);
	intel_psr_enable_source(intel_dp, crtc_state);
1012
	intel_dp->psr.enabled = true;
1013 1014 1015 1016

	intel_psr_activate(intel_dp);
}

R
Rodrigo Vivi 已提交
1017 1018 1019
/**
 * intel_psr_enable - Enable PSR
 * @intel_dp: Intel DP
1020
 * @crtc_state: new CRTC state
1021
 * @conn_state: new CONNECTOR state
R
Rodrigo Vivi 已提交
1022 1023 1024
 *
 * This function can only be called after the pipe is fully trained and enabled.
 */
1025
void intel_psr_enable(struct intel_dp *intel_dp,
1026 1027
		      const struct intel_crtc_state *crtc_state,
		      const struct drm_connector_state *conn_state)
R
Rodrigo Vivi 已提交
1028
{
1029
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
R
Rodrigo Vivi 已提交
1030

1031
	if (!CAN_PSR(intel_dp))
R
Rodrigo Vivi 已提交
1032 1033
		return;

1034
	if (!crtc_state->has_psr)
1035 1036
		return;

1037
	drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
1038

1039 1040 1041
	mutex_lock(&intel_dp->psr.lock);
	intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
	mutex_unlock(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
1042 1043
}

1044
static void intel_psr_exit(struct intel_dp *intel_dp)
1045
{
1046
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1047 1048
	u32 val;

1049 1050
	if (!intel_dp->psr.active) {
		if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1051
			val = intel_de_read(dev_priv,
1052
					    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1053
			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1054 1055
		}

1056
		val = intel_de_read(dev_priv,
1057
				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1058
		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1059

1060
		return;
1061
	}
1062

1063 1064
	if (intel_dp->psr.psr2_enabled) {
		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1065
		val = intel_de_read(dev_priv,
1066
				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1067
		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1068
		val &= ~EDP_PSR2_ENABLE;
1069
		intel_de_write(dev_priv,
1070
			       EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1071
	} else {
1072
		val = intel_de_read(dev_priv,
1073
				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1074
		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1075
		val &= ~EDP_PSR_ENABLE;
1076
		intel_de_write(dev_priv,
1077
			       EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1078
	}
1079
	intel_dp->psr.active = false;
1080 1081
}

1082
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1083
{
1084
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1085 1086
	i915_reg_t psr_status;
	u32 psr_status_mask;
R
Rodrigo Vivi 已提交
1087

1088
	lockdep_assert_held(&intel_dp->psr.lock);
1089

1090
	if (!intel_dp->psr.enabled)
1091 1092
		return;

1093
	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1094
		    intel_dp->psr.psr2_enabled ? "2" : "1");
1095

1096
	intel_psr_exit(intel_dp);
1097

1098 1099
	if (intel_dp->psr.psr2_enabled) {
		psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1100
		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1101
	} else {
1102
		psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1103
		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1104
	}
1105 1106

	/* Wait till PSR is idle */
1107 1108
	if (intel_de_wait_for_clear(dev_priv, psr_status,
				    psr_status_mask, 2000))
1109
		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1110

1111
	/* WA 1408330847 */
1112
	if (intel_dp->psr.psr2_sel_fetch_enabled &&
1113
	    (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
1114 1115 1116 1117
	     IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
			     DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);

1118 1119 1120
	/* Disable PSR on Sink */
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);

1121
	if (intel_dp->psr.psr2_enabled)
1122 1123
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);

1124
	intel_dp->psr.enabled = false;
1125 1126
}

1127 1128 1129
/**
 * intel_psr_disable - Disable PSR
 * @intel_dp: Intel DP
1130
 * @old_crtc_state: old CRTC state
1131 1132 1133
 *
 * This function needs to be called before disabling pipe.
 */
1134 1135
void intel_psr_disable(struct intel_dp *intel_dp,
		       const struct intel_crtc_state *old_crtc_state)
1136
{
1137
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1138

1139
	if (!old_crtc_state->has_psr)
1140 1141
		return;

1142
	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1143 1144
		return;

1145
	mutex_lock(&intel_dp->psr.lock);
1146

1147
	intel_psr_disable_locked(intel_dp);
1148

1149 1150 1151
	mutex_unlock(&intel_dp->psr.lock);
	cancel_work_sync(&intel_dp->psr.work);
	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
R
Rodrigo Vivi 已提交
1152 1153
}

1154
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1155
{
1156 1157
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	if (IS_TIGERLAKE(dev_priv))
		/*
		 * Writes to CURSURFLIVE in TGL are causing IOMMU errors and
		 * visual glitches that are often reproduced when executing
		 * CPU intensive workloads while a eDP 4K panel is attached.
		 *
		 * Manually exiting PSR causes the frontbuffer to be updated
		 * without glitches and the IOMMU errors are also gone but
		 * this comes at the cost of less time with PSR active.
		 *
		 * So using this workaround until this issue is root caused
		 * and a better fix is found.
		 */
1171
		intel_psr_exit(intel_dp);
1172
	else if (INTEL_GEN(dev_priv) >= 9)
1173 1174 1175 1176 1177 1178 1179 1180 1181
		/*
		 * Display WA #0884: skl+
		 * This documented WA for bxt can be safely applied
		 * broadly so we can force HW tracking to exit PSR
		 * instead of disabling and re-enabling.
		 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
		 * but it makes more sense write to the current active
		 * pipe.
		 */
1182
		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1183 1184 1185 1186 1187
	else
		/*
		 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
		 * on older gens so doing the manual exit instead.
		 */
1188
		intel_psr_exit(intel_dp);
1189 1190
}

1191 1192 1193 1194 1195 1196 1197
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
					const struct intel_crtc_state *crtc_state,
					const struct intel_plane_state *plane_state,
					int color_plane)
{
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
	enum pipe pipe = plane->pipe;
1198
	const struct drm_rect *clip;
1199 1200
	u32 val, offset;
	int ret, x, y;
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210

	if (!crtc_state->enable_psr2_sel_fetch)
		return;

	val = plane_state ? plane_state->ctl : 0;
	val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
	if (!val || plane->id == PLANE_CURSOR)
		return;

1211 1212 1213 1214
	clip = &plane_state->psr2_sel_fetch_area;

	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
	val |= plane_state->uapi.dst.x1;
1215 1216
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);

1217 1218 1219 1220 1221 1222 1223 1224
	/* TODO: consider auxiliary surfaces */
	x = plane_state->uapi.src.x1 >> 16;
	y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
	ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
	if (ret)
		drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
			      ret);
	val = y << 16 | x;
1225 1226 1227 1228
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
			  val);

	/* Sizes are 0 based */
1229
	val = (drm_rect_height(clip) - 1) << 16;
1230 1231 1232 1233
	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
}

1234 1235
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
1236
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1237 1238 1239 1240 1241

	if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
	    !crtc_state->enable_psr2_sel_fetch)
		return;

1242 1243
	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
		       crtc_state->psr2_man_track_ctl);
1244 1245
}

1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
				  struct drm_rect *clip, bool full_update)
{
	u32 val = PSR2_MAN_TRK_CTL_ENABLE;

	if (full_update) {
		val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
		goto exit;
	}

	if (clip->y1 == -1)
		goto exit;

1259 1260
	drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);

1261 1262
	val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
	val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1263
	val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
exit:
	crtc_state->psr2_man_track_ctl = val;
}

static void clip_area_update(struct drm_rect *overlap_damage_area,
			     struct drm_rect *damage_area)
{
	if (overlap_damage_area->y1 == -1) {
		overlap_damage_area->y1 = damage_area->y1;
		overlap_damage_area->y2 = damage_area->y2;
		return;
	}

	if (damage_area->y1 < overlap_damage_area->y1)
		overlap_damage_area->y1 = damage_area->y1;

	if (damage_area->y2 > overlap_damage_area->y2)
		overlap_damage_area->y2 = damage_area->y2;
}

int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
				struct intel_crtc *crtc)
1286 1287
{
	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1288
	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1289 1290 1291 1292
	struct intel_plane_state *new_plane_state, *old_plane_state;
	struct intel_plane *plane;
	bool full_update = false;
	int i, ret;
1293 1294

	if (!crtc_state->enable_psr2_sel_fetch)
1295 1296 1297 1298 1299 1300
		return 0;

	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
	if (ret)
		return ret;

1301 1302 1303 1304 1305 1306
	/*
	 * Calculate minimal selective fetch area of each plane and calculate
	 * the pipe damaged area.
	 * In the next loop the plane selective fetch area will actually be set
	 * using whole pipe damaged area.
	 */
1307 1308
	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
					     new_plane_state, i) {
1309 1310 1311
		struct drm_rect src, damaged_area = { .y1 = -1 };
		struct drm_mode_rect *damaged_clips;
		u32 num_clips, j;
1312 1313 1314 1315

		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
			continue;

1316 1317 1318 1319
		if (!new_plane_state->uapi.visible &&
		    !old_plane_state->uapi.visible)
			continue;

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
		/*
		 * TODO: Not clear how to handle planes with negative position,
		 * also planes are not updated if they have a negative X
		 * position so for now doing a full update in this cases
		 */
		if (new_plane_state->uapi.dst.y1 < 0 ||
		    new_plane_state->uapi.dst.x1 < 0) {
			full_update = true;
			break;
		}

1331
		num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
1332 1333

		/*
1334 1335 1336
		 * If visibility or plane moved, mark the whole plane area as
		 * damaged as it needs to be complete redraw in the new and old
		 * position.
1337
		 */
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
		    !drm_rect_equals(&new_plane_state->uapi.dst,
				     &old_plane_state->uapi.dst)) {
			if (old_plane_state->uapi.visible) {
				damaged_area.y1 = old_plane_state->uapi.dst.y1;
				damaged_area.y2 = old_plane_state->uapi.dst.y2;
				clip_area_update(&pipe_clip, &damaged_area);
			}

			if (new_plane_state->uapi.visible) {
				damaged_area.y1 = new_plane_state->uapi.dst.y1;
				damaged_area.y2 = new_plane_state->uapi.dst.y2;
				clip_area_update(&pipe_clip, &damaged_area);
			}
			continue;
		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
			   (!num_clips &&
			    new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
			/*
			 * If the plane don't have damaged areas but the
			 * framebuffer changed or alpha changed, mark the whole
			 * plane area as damaged.
			 */
			damaged_area.y1 = new_plane_state->uapi.dst.y1;
			damaged_area.y2 = new_plane_state->uapi.dst.y2;
			clip_area_update(&pipe_clip, &damaged_area);
			continue;
		}

		drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
		damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);

		for (j = 0; j < num_clips; j++) {
			struct drm_rect clip;

			clip.x1 = damaged_clips[j].x1;
			clip.y1 = damaged_clips[j].y1;
			clip.x2 = damaged_clips[j].x2;
			clip.y2 = damaged_clips[j].y2;
			if (drm_rect_intersect(&clip, &src))
				clip_area_update(&damaged_area, &clip);
		}

		if (damaged_area.y1 == -1)
			continue;

		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
		clip_area_update(&pipe_clip, &damaged_area);
	}

	if (full_update)
		goto skip_sel_fetch_set_loop;
1391

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	/* It must be aligned to 4 lines */
	pipe_clip.y1 -= pipe_clip.y1 % 4;
	if (pipe_clip.y2 % 4)
		pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;

	/*
	 * Now that we have the pipe damaged area check if it intersect with
	 * every plane, if it does set the plane selective fetch area.
	 */
	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
					     new_plane_state, i) {
		struct drm_rect *sel_fetch_area, inter;

		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
		    !new_plane_state->uapi.visible)
			continue;

		inter = pipe_clip;
		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
			continue;

		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1416
	}
1417

1418
skip_sel_fetch_set_loop:
1419 1420
	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
	return 0;
1421 1422
}

1423 1424 1425 1426
/**
 * intel_psr_update - Update PSR state
 * @intel_dp: Intel DP
 * @crtc_state: new CRTC state
1427
 * @conn_state: new CONNECTOR state
1428 1429 1430 1431 1432 1433
 *
 * This functions will update PSR states, disabling, enabling or switching PSR
 * version when executing fastsets. For full modeset, intel_psr_disable() and
 * intel_psr_enable() should be called instead.
 */
void intel_psr_update(struct intel_dp *intel_dp,
1434 1435
		      const struct intel_crtc_state *crtc_state,
		      const struct drm_connector_state *conn_state)
1436 1437
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1438
	struct intel_psr *psr = &intel_dp->psr;
1439 1440
	bool enable, psr2_enable;

1441
	if (!CAN_PSR(intel_dp))
1442 1443
		return;

1444
	mutex_lock(&intel_dp->psr.lock);
1445

1446 1447
	enable = crtc_state->has_psr;
	psr2_enable = crtc_state->has_psr2;
1448

1449 1450
	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
	    crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
1451 1452
		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
		if (crtc_state->crc_enabled && psr->enabled)
1453
			psr_force_hw_tracking_exit(intel_dp);
1454 1455 1456 1457 1458
		else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
			/*
			 * Activate PSR again after a force exit when enabling
			 * CRC in older gens
			 */
1459 1460 1461
			if (!intel_dp->psr.active &&
			    !intel_dp->psr.busy_frontbuffer_bits)
				schedule_work(&intel_dp->psr.work);
1462
		}
1463

1464
		goto unlock;
1465
	}
1466

1467 1468
	if (psr->enabled)
		intel_psr_disable_locked(intel_dp);
1469

1470
	if (enable)
1471
		intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1472 1473

unlock:
1474
	mutex_unlock(&intel_dp->psr.lock);
1475 1476
}

1477
/**
1478 1479
 * psr_wait_for_idle - wait for PSR1 to idle
 * @intel_dp: Intel DP
1480 1481 1482
 * @out_value: PSR status in case of failure
 *
 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1483
 *
1484
 */
1485
static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
1486
{
1487
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1488 1489

	/*
1490 1491 1492 1493
	 * From bspec: Panel Self Refresh (BDW+)
	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
	 * defensive enough to cover everything.
1494
	 */
1495
	return __intel_wait_for_register(&dev_priv->uncore,
1496
					 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1497
					 EDP_PSR_STATUS_STATE_MASK,
1498 1499
					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
					 out_value);
1500 1501
}

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
/**
 * intel_psr_wait_for_idle - wait for PSR1 to idle
 * @new_crtc_state: new CRTC state
 *
 * This function is expected to be called from pipe_update_start() where it is
 * not expected to race with PSR enable or disable.
 */
void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
	struct intel_encoder *encoder;

	if (!new_crtc_state->has_psr)
		return;

1517 1518
	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
					     new_crtc_state->uapi.encoder_mask) {
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
		u32 psr_status;

		mutex_lock(&intel_dp->psr.lock);
		if (!intel_dp->psr.enabled ||
		    (intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}

		/* when the PSR1 is enabled */
		if (psr_wait_for_idle(intel_dp, &psr_status))
			drm_err(&dev_priv->drm,
				"PSR idle timed out 0x%x, atomic update may fail\n",
				psr_status);
		mutex_unlock(&intel_dp->psr.lock);
	}
}

static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
1539
{
1540
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1541 1542 1543 1544
	i915_reg_t reg;
	u32 mask;
	int err;

1545
	if (!intel_dp->psr.enabled)
1546
		return false;
R
Rodrigo Vivi 已提交
1547

1548 1549
	if (intel_dp->psr.psr2_enabled) {
		reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1550
		mask = EDP_PSR2_STATUS_STATE_MASK;
1551
	} else {
1552
		reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1553
		mask = EDP_PSR_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1554
	}
1555

1556
	mutex_unlock(&intel_dp->psr.lock);
1557

1558
	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1559
	if (err)
1560 1561
		drm_err(&dev_priv->drm,
			"Timed out waiting for PSR Idle for re-enable\n");
1562 1563

	/* After the unlocked wait, verify that PSR is still wanted! */
1564 1565
	mutex_lock(&intel_dp->psr.lock);
	return err == 0 && intel_dp->psr.enabled;
1566
}
R
Rodrigo Vivi 已提交
1567

1568
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1569
{
1570
	struct drm_connector_list_iter conn_iter;
1571 1572 1573
	struct drm_device *dev = &dev_priv->drm;
	struct drm_modeset_acquire_ctx ctx;
	struct drm_atomic_state *state;
1574 1575
	struct drm_connector *conn;
	int err = 0;
1576

1577 1578 1579
	state = drm_atomic_state_alloc(dev);
	if (!state)
		return -ENOMEM;
1580

1581 1582 1583 1584 1585
	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
	state->acquire_ctx = &ctx;

retry:

1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(conn, &conn_iter) {
		struct drm_connector_state *conn_state;
		struct drm_crtc_state *crtc_state;

		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
			continue;

		conn_state = drm_atomic_get_connector_state(state, conn);
		if (IS_ERR(conn_state)) {
			err = PTR_ERR(conn_state);
			break;
1598 1599
		}

1600 1601 1602 1603 1604 1605
		if (!conn_state->crtc)
			continue;

		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
		if (IS_ERR(crtc_state)) {
			err = PTR_ERR(crtc_state);
1606 1607
			break;
		}
1608 1609 1610

		/* Mark mode as changed to trigger a pipe->update() */
		crtc_state->mode_changed = true;
1611
	}
1612
	drm_connector_list_iter_end(&conn_iter);
1613

1614 1615
	if (err == 0)
		err = drm_atomic_commit(state);
1616

1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
	if (err == -EDEADLK) {
		drm_atomic_state_clear(state);
		err = drm_modeset_backoff(&ctx);
		if (!err)
			goto retry;
	}

	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);
	drm_atomic_state_put(state);

	return err;
1629 1630
}

1631
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
1632
{
1633
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1634 1635
	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
	u32 old_mode;
1636 1637 1638
	int ret;

	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1639
	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1640
		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
1641 1642 1643
		return -EINVAL;
	}

1644
	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
1645 1646 1647
	if (ret)
		return ret;

1648 1649
	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
	intel_dp->psr.debug = val;
1650 1651 1652 1653 1654

	/*
	 * Do it right away if it's already enabled, otherwise it will be done
	 * when enabling the source.
	 */
1655 1656
	if (intel_dp->psr.enabled)
		psr_irq_control(intel_dp);
1657

1658
	mutex_unlock(&intel_dp->psr.lock);
1659 1660 1661 1662

	if (old_mode != mode)
		ret = intel_psr_fastset_force(dev_priv);

1663 1664 1665
	return ret;
}

1666
static void intel_psr_handle_irq(struct intel_dp *intel_dp)
1667
{
1668
	struct intel_psr *psr = &intel_dp->psr;
1669

1670
	intel_psr_disable_locked(intel_dp);
1671 1672
	psr->sink_not_reliable = true;
	/* let's make sure that sink is awaken */
1673
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1674 1675
}

1676 1677
static void intel_psr_work(struct work_struct *work)
{
1678 1679
	struct intel_dp *intel_dp =
		container_of(work, typeof(*intel_dp), psr.work);
1680

1681
	mutex_lock(&intel_dp->psr.lock);
1682

1683
	if (!intel_dp->psr.enabled)
1684 1685
		goto unlock;

1686 1687
	if (READ_ONCE(intel_dp->psr.irq_aux_error))
		intel_psr_handle_irq(intel_dp);
1688

1689 1690 1691 1692 1693 1694
	/*
	 * We have to make sure PSR is ready for re-enable
	 * otherwise it keeps disabled until next full enable/disable cycle.
	 * PSR might take some time to get fully disabled
	 * and be ready for re-enable.
	 */
1695
	if (!__psr_wait_for_idle_locked(intel_dp))
R
Rodrigo Vivi 已提交
1696 1697 1698 1699 1700 1701 1702
		goto unlock;

	/*
	 * The delayed work can race with an invalidate hence we need to
	 * recheck. Since psr_flush first clears this and then reschedules we
	 * won't ever miss a flush when bailing out here.
	 */
1703
	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
R
Rodrigo Vivi 已提交
1704 1705
		goto unlock;

1706
	intel_psr_activate(intel_dp);
R
Rodrigo Vivi 已提交
1707
unlock:
1708
	mutex_unlock(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
1709 1710
}

R
Rodrigo Vivi 已提交
1711 1712
/**
 * intel_psr_invalidate - Invalidade PSR
1713
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1714
 * @frontbuffer_bits: frontbuffer plane tracking bits
1715
 * @origin: which operation caused the invalidate
R
Rodrigo Vivi 已提交
1716 1717 1718 1719 1720 1721 1722 1723
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 */
1724
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1725
			  unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1726
{
1727
	struct intel_encoder *encoder;
1728

1729
	if (origin == ORIGIN_FLIP)
1730 1731
		return;

1732
	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1733 1734
		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
R
Rodrigo Vivi 已提交
1735

1736 1737 1738 1739 1740
		mutex_lock(&intel_dp->psr.lock);
		if (!intel_dp->psr.enabled) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}
1741

1742 1743 1744
		pipe_frontbuffer_bits &=
			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
1745

1746 1747
		if (pipe_frontbuffer_bits)
			intel_psr_exit(intel_dp);
R
Rodrigo Vivi 已提交
1748

1749 1750 1751
		mutex_unlock(&intel_dp->psr.lock);
	}
}
1752 1753 1754 1755
/*
 * When we will be completely rely on PSR2 S/W tracking in future,
 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
 * event also therefore tgl_dc3co_flush() require to be changed
1756
 * accordingly in future.
1757 1758
 */
static void
1759 1760
tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
		enum fb_op_origin origin)
1761
{
1762
	mutex_lock(&intel_dp->psr.lock);
1763

1764
	if (!intel_dp->psr.dc3co_enabled)
1765 1766
		goto unlock;

1767
	if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
1768 1769 1770 1771 1772 1773 1774
		goto unlock;

	/*
	 * At every frontbuffer flush flip event modified delay of delayed work,
	 * when delayed work schedules that means display has been idle.
	 */
	if (!(frontbuffer_bits &
1775
	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
1776 1777
		goto unlock;

1778 1779 1780
	tgl_psr2_enable_dc3co(intel_dp);
	mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
			 intel_dp->psr.dc3co_exit_delay);
1781 1782

unlock:
1783
	mutex_unlock(&intel_dp->psr.lock);
1784 1785
}

R
Rodrigo Vivi 已提交
1786 1787
/**
 * intel_psr_flush - Flush PSR
1788
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1789
 * @frontbuffer_bits: frontbuffer plane tracking bits
1790
 * @origin: which operation caused the flush
R
Rodrigo Vivi 已提交
1791 1792 1793 1794 1795 1796 1797 1798
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering has completed and flushed out to memory. PSR
 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
 */
1799
void intel_psr_flush(struct drm_i915_private *dev_priv,
1800
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1801
{
1802
	struct intel_encoder *encoder;
1803

1804
	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1805 1806
		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1807

1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
		if (origin == ORIGIN_FLIP) {
			tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
			continue;
		}

		mutex_lock(&intel_dp->psr.lock);
		if (!intel_dp->psr.enabled) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}
R
Rodrigo Vivi 已提交
1818

1819 1820 1821
		pipe_frontbuffer_bits &=
			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
R
Rodrigo Vivi 已提交
1822

1823 1824 1825
		/* By definition flush = invalidate + flush */
		if (pipe_frontbuffer_bits)
			psr_force_hw_tracking_exit(intel_dp);
1826

1827 1828 1829 1830
		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
			schedule_work(&intel_dp->psr.work);
		mutex_unlock(&intel_dp->psr.lock);
	}
R
Rodrigo Vivi 已提交
1831 1832
}

R
Rodrigo Vivi 已提交
1833 1834
/**
 * intel_psr_init - Init basic PSR work and mutex.
1835
 * @intel_dp: Intel DP
R
Rodrigo Vivi 已提交
1836
 *
1837 1838 1839
 * This function is called after the initializing connector.
 * (the initializing of connector treats the handling of connector capabilities)
 * And it initializes basic PSR stuff for each DP Encoder.
R
Rodrigo Vivi 已提交
1840
 */
1841
void intel_psr_init(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
1842
{
1843 1844 1845
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

1846 1847 1848
	if (!HAS_PSR(dev_priv))
		return;

1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
	/*
	 * HSW spec explicitly says PSR is tied to port A.
	 * BDW+ platforms have a instance of PSR registers per transcoder but
	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
	 * than eDP one.
	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
	 * But GEN12 supports a instance of PSR registers per transcoder.
	 */
	if (INTEL_GEN(dev_priv) < 12 && dig_port->base.port != PORT_A) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Port not supported\n");
		return;
	}

	intel_dp->psr.source_support = true;

1866 1867 1868 1869 1870 1871 1872 1873
	if (IS_HASWELL(dev_priv))
		/*
		 * HSW don't have PSR registers on the same space as transcoder
		 * so set this to a value that when subtract to the register
		 * in transcoder space results in the right offset for HSW
		 */
		dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;

1874
	if (dev_priv->params.enable_psr == -1)
1875
		if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1876
			dev_priv->params.enable_psr = 0;
1877

1878
	/* Set link_standby x link_off defaults */
1879
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1880
		/* HSW and BDW require workarounds that we don't implement. */
1881
		intel_dp->psr.link_standby = false;
1882 1883
	else if (INTEL_GEN(dev_priv) < 12)
		/* For new platforms up to TGL let's respect VBT back again */
1884
		intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
1885

1886 1887 1888
	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
	mutex_init(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
1889
}
1890

1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
					   u8 *status, u8 *error_status)
{
	struct drm_dp_aux *aux = &intel_dp->aux;
	int ret;

	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
	if (ret != 1)
		return ret;

	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
	if (ret != 1)
		return ret;

	*status = *status & DP_PSR_SINK_STATE_MASK;

	return 0;
}

1910 1911 1912 1913
static void psr_alpm_check(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct drm_dp_aux *aux = &intel_dp->aux;
1914
	struct intel_psr *psr = &intel_dp->psr;
1915 1916 1917 1918 1919 1920 1921 1922
	u8 val;
	int r;

	if (!psr->psr2_enabled)
		return;

	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
	if (r != 1) {
1923
		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
1924 1925 1926 1927 1928 1929
		return;
	}

	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
		intel_psr_disable_locked(intel_dp);
		psr->sink_not_reliable = true;
1930 1931
		drm_dbg_kms(&dev_priv->drm,
			    "ALPM lock timeout error, disabling PSR\n");
1932 1933 1934 1935 1936 1937

		/* Clearing error */
		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
	}
}

1938 1939 1940
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1941
	struct intel_psr *psr = &intel_dp->psr;
1942 1943 1944 1945 1946
	u8 val;
	int r;

	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
	if (r != 1) {
1947
		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
1948 1949 1950 1951 1952 1953
		return;
	}

	if (val & DP_PSR_CAPS_CHANGE) {
		intel_psr_disable_locked(intel_dp);
		psr->sink_not_reliable = true;
1954 1955
		drm_dbg_kms(&dev_priv->drm,
			    "Sink PSR capability changed, disabling PSR\n");
1956 1957 1958 1959 1960 1961

		/* Clearing it */
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
	}
}

1962 1963
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
1964
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1965
	struct intel_psr *psr = &intel_dp->psr;
1966
	u8 status, error_status;
1967
	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1968 1969
			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
			  DP_PSR_LINK_CRC_ERROR;
1970

1971
	if (!CAN_PSR(intel_dp))
1972 1973 1974 1975
		return;

	mutex_lock(&psr->lock);

1976
	if (!psr->enabled)
1977 1978
		goto exit;

1979
	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
1980 1981
		drm_err(&dev_priv->drm,
			"Error reading PSR status or error status\n");
1982 1983 1984
		goto exit;
	}

1985
	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
1986
		intel_psr_disable_locked(intel_dp);
1987
		psr->sink_not_reliable = true;
1988 1989
	}

1990
	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
1991 1992
		drm_dbg_kms(&dev_priv->drm,
			    "PSR sink internal error, disabling PSR\n");
1993
	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
1994 1995
		drm_dbg_kms(&dev_priv->drm,
			    "PSR RFB storage error, disabling PSR\n");
1996
	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1997 1998
		drm_dbg_kms(&dev_priv->drm,
			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
1999
	if (error_status & DP_PSR_LINK_CRC_ERROR)
2000 2001
		drm_dbg_kms(&dev_priv->drm,
			    "PSR Link CRC error, disabling PSR\n");
2002

2003
	if (error_status & ~errors)
2004 2005 2006
		drm_err(&dev_priv->drm,
			"PSR_ERROR_STATUS unhandled errors %x\n",
			error_status & ~errors);
2007
	/* clear status register */
2008
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2009 2010

	psr_alpm_check(intel_dp);
2011
	psr_capability_changed_check(intel_dp);
2012

2013 2014 2015
exit:
	mutex_unlock(&psr->lock);
}
2016 2017 2018 2019 2020

bool intel_psr_enabled(struct intel_dp *intel_dp)
{
	bool ret;

2021
	if (!CAN_PSR(intel_dp))
2022 2023
		return false;

2024 2025 2026
	mutex_lock(&intel_dp->psr.lock);
	ret = intel_dp->psr.enabled;
	mutex_unlock(&intel_dp->psr.lock);
2027 2028 2029

	return ret;
}