intel_psr.c 59.7 KB
Newer Older
R
Rodrigo Vivi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

24 25
#include <drm/drm_atomic_helper.h>

26 27
#include "display/intel_dp.h"

28
#include "i915_drv.h"
29
#include "intel_atomic.h"
30
#include "intel_display_types.h"
31 32
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
33
#include "intel_psr.h"
34
#include "intel_sprite.h"
35
#include "skl_universal_plane.h"
36

R
Rodrigo Vivi 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/**
 * DOC: Panel Self Refresh (PSR/SRD)
 *
 * Since Haswell Display controller supports Panel Self-Refresh on display
 * panels witch have a remote frame buffer (RFB) implemented according to PSR
 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
 * when system is idle but display is on as it eliminates display refresh
 * request to DDR memory completely as long as the frame buffer for that
 * display is unchanged.
 *
 * Panel Self Refresh must be supported by both Hardware (source) and
 * Panel (sink).
 *
 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
 * to power down the link and memory controller. For DSI panels the same idea
 * is called "manual mode".
 *
 * The implementation uses the hardware-based PSR support which automatically
 * enters/exits self-refresh mode. The hardware takes care of sending the
 * required DP aux message and could even retrain the link (that part isn't
 * enabled yet though). The hardware also keeps track of any frontbuffer
 * changes to know when to exit self-refresh mode again. Unfortunately that
 * part doesn't work too well, hence why the i915 PSR support uses the
 * software frontbuffer tracking to make sure it doesn't miss a screen
 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
 * get called by the frontbuffer tracking code. Note that because of locking
 * issues the self-refresh re-enable code is done from a work queue, which
 * must be correctly synchronized/cancelled when shutting down the pipe."
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
 *
 * DC3CO (DC3 clock off)
 *
 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
 * clock off automatically during PSR2 idle state.
 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
 * entry/exit allows the HW to enter a low-power state even when page flipping
 * periodically (for instance a 30fps video playback scenario).
 *
 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
 * frames, if no other flip occurs and the function above is executed, DC3CO is
 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
 * of another flip.
 * Front buffer modifications do not trigger DC3CO activation on purpose as it
 * would bring a lot of complexity and most of the moderns systems will only
 * use page flips.
R
Rodrigo Vivi 已提交
82 83
 */

84
static bool psr_global_enabled(struct intel_dp *intel_dp)
85
{
86 87 88
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);

	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
89
	case I915_PSR_DEBUG_DEFAULT:
90
		return i915->params.enable_psr;
91 92 93 94 95 96 97
	case I915_PSR_DEBUG_DISABLE:
		return false;
	default:
		return true;
	}
}

98
static bool psr2_global_enabled(struct intel_dp *intel_dp)
99
{
100
	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
101
	case I915_PSR_DEBUG_DISABLE:
102 103 104
	case I915_PSR_DEBUG_FORCE_PSR1:
		return false;
	default:
105
		return true;
106 107 108
	}
}

109
static void psr_irq_control(struct intel_dp *intel_dp)
110
{
111
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
112 113
	enum transcoder trans_shift;
	i915_reg_t imr_reg;
114
	u32 mask, val;
115

116 117 118 119 120 121 122
	/*
	 * gen12+ has registers relative to transcoder and one per transcoder
	 * using the same bit definition: handle it as TRANSCODER_EDP to force
	 * 0 shift in bit definition
	 */
	if (INTEL_GEN(dev_priv) >= 12) {
		trans_shift = 0;
123
		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
124
	} else {
125
		trans_shift = intel_dp->psr.transcoder;
126 127 128 129
		imr_reg = EDP_PSR_IMR;
	}

	mask = EDP_PSR_ERROR(trans_shift);
130
	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
131 132
		mask |= EDP_PSR_POST_EXIT(trans_shift) |
			EDP_PSR_PRE_ENTRY(trans_shift);
133 134

	/* Warning: it is masking/setting reserved bits too */
135
	val = intel_de_read(dev_priv, imr_reg);
136
	val &= ~EDP_PSR_TRANS_MASK(trans_shift);
137
	val |= ~mask;
138
	intel_de_write(dev_priv, imr_reg, val);
139 140
}

141 142
static void psr_event_print(struct drm_i915_private *i915,
			    u32 val, bool psr2_enabled)
143
{
144
	drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
145
	if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
146
		drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
147
	if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
148
		drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
149
	if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
150
		drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
151
	if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
152
		drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
153
	if (val & PSR_EVENT_GRAPHICS_RESET)
154
		drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
155
	if (val & PSR_EVENT_PCH_INTERRUPT)
156
		drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
157
	if (val & PSR_EVENT_MEMORY_UP)
158
		drm_dbg_kms(&i915->drm, "\tMemory up\n");
159
	if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
160
		drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
161
	if (val & PSR_EVENT_WD_TIMER_EXPIRE)
162
		drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
163
	if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
164
		drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
165
	if (val & PSR_EVENT_REGISTER_UPDATE)
166
		drm_dbg_kms(&i915->drm, "\tRegister updated\n");
167
	if (val & PSR_EVENT_HDCP_ENABLE)
168
		drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
169
	if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
170
		drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
171
	if (val & PSR_EVENT_VBI_ENABLE)
172
		drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
173
	if (val & PSR_EVENT_LPSP_MODE_EXIT)
174
		drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
175
	if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
176
		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
177 178
}

179
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
180
{
181 182 183
	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	ktime_t time_ns =  ktime_get();
184 185
	enum transcoder trans_shift;
	i915_reg_t imr_reg;
186

187 188
	if (INTEL_GEN(dev_priv) >= 12) {
		trans_shift = 0;
189
		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
190
	} else {
191
		trans_shift = intel_dp->psr.transcoder;
192 193 194 195
		imr_reg = EDP_PSR_IMR;
	}

	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
196
		intel_dp->psr.last_entry_attempt = time_ns;
197 198 199
		drm_dbg_kms(&dev_priv->drm,
			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
			    transcoder_name(cpu_transcoder));
200
	}
201

202
	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
203
		intel_dp->psr.last_exit = time_ns;
204 205 206
		drm_dbg_kms(&dev_priv->drm,
			    "[transcoder %s] PSR exit completed\n",
			    transcoder_name(cpu_transcoder));
207

208
		if (INTEL_GEN(dev_priv) >= 9) {
209 210
			u32 val = intel_de_read(dev_priv,
						PSR_EVENT(cpu_transcoder));
211
			bool psr2_enabled = intel_dp->psr.psr2_enabled;
212

213 214
			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
				       val);
215
			psr_event_print(dev_priv, val, psr2_enabled);
216
		}
217
	}
218

219
	if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
220
		u32 val;
221

222
		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
223
			 transcoder_name(cpu_transcoder));
224

225
		intel_dp->psr.irq_aux_error = true;
226

227 228 229 230 231 232 233 234
		/*
		 * If this interruption is not masked it will keep
		 * interrupting so fast that it prevents the scheduled
		 * work to run.
		 * Also after a PSR error, we don't want to arm PSR
		 * again so we don't care about unmask the interruption
		 * or unset irq_aux_error.
		 */
235
		val = intel_de_read(dev_priv, imr_reg);
236
		val |= EDP_PSR_ERROR(trans_shift);
237
		intel_de_write(dev_priv, imr_reg, val);
238

239
		schedule_work(&intel_dp->psr.work);
240
	}
241 242
}

243 244
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
245
	u8 alpm_caps = 0;
246 247 248 249 250 251 252

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
			      &alpm_caps) != 1)
		return false;
	return alpm_caps & DP_ALPM_CAP;
}

253 254
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
255
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
256
	u8 val = 8; /* assume the worst if we can't read the value */
257 258 259 260 261

	if (drm_dp_dpcd_readb(&intel_dp->aux,
			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
	else
262 263
		drm_dbg_kms(&i915->drm,
			    "Unable to get sink synchronization latency, assuming 8 frames\n");
264 265 266
	return val;
}

267 268
static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
{
269
	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
270 271 272 273 274 275 276 277 278 279 280 281
	u16 val;
	ssize_t r;

	/*
	 * Returning the default X granularity if granularity not required or
	 * if DPCD read fails
	 */
	if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
		return 4;

	r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
	if (r != 2)
282 283
		drm_dbg_kms(&i915->drm,
			    "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
284 285 286 287 288 289 290 291 292 293 294

	/*
	 * Spec says that if the value read is 0 the default granularity should
	 * be used instead.
	 */
	if (r != 2 || val == 0)
		val = 4;

	return val;
}

295 296 297 298 299 300 301 302
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);

	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
			 sizeof(intel_dp->psr_dpcd));

303 304
	if (!intel_dp->psr_dpcd[0])
		return;
305 306
	drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
		    intel_dp->psr_dpcd[0]);
307

308
	if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
309 310
		drm_dbg_kms(&dev_priv->drm,
			    "PSR support not currently available for this panel\n");
311 312 313
		return;
	}

314
	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
315 316
		drm_dbg_kms(&dev_priv->drm,
			    "Panel lacks power state control, PSR cannot be enabled\n");
317 318
		return;
	}
319

320 321
	intel_dp->psr.sink_support = true;
	intel_dp->psr.sink_sync_latency =
322
		intel_dp_get_sink_sync_latency(intel_dp);
323 324

	if (INTEL_GEN(dev_priv) >= 9 &&
325
	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
326 327 328 329
		bool y_req = intel_dp->psr_dpcd[1] &
			     DP_PSR2_SU_Y_COORDINATE_REQUIRED;
		bool alpm = intel_dp_get_alpm_status(intel_dp);

330 331 332 333 334 335 336 337 338 339 340
		/*
		 * All panels that supports PSR version 03h (PSR2 +
		 * Y-coordinate) can handle Y-coordinates in VSC but we are
		 * only sure that it is going to be used when required by the
		 * panel. This way panel is capable to do selective update
		 * without a aux frame sync.
		 *
		 * To support PSR version 02h and PSR version 03h without
		 * Y-coordinate requirement panels we would need to enable
		 * GTC first.
		 */
341
		intel_dp->psr.sink_psr2_support = y_req && alpm;
342
		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
343
			    intel_dp->psr.sink_psr2_support ? "" : "not ");
344

345 346
		if (intel_dp->psr.sink_psr2_support) {
			intel_dp->psr.colorimetry_support =
347
				intel_dp_get_colorimetry_status(intel_dp);
348
			intel_dp->psr.su_x_granularity =
349
				intel_dp_get_su_x_granulartiy(intel_dp);
350 351 352 353
		}
	}
}

354
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
355
{
356
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
357 358
	u32 aux_clock_divider, aux_ctl;
	int i;
359
	static const u8 aux_msg[] = {
R
Rodrigo Vivi 已提交
360 361 362 363 364 365
		[0] = DP_AUX_NATIVE_WRITE << 4,
		[1] = DP_SET_POWER >> 8,
		[2] = DP_SET_POWER & 0xff,
		[3] = 1 - 1,
		[4] = DP_SET_POWER_D0,
	};
366 367 368 369
	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
R
Rodrigo Vivi 已提交
370 371

	BUILD_BUG_ON(sizeof(aux_msg) > 20);
372
	for (i = 0; i < sizeof(aux_msg); i += 4)
373
		intel_de_write(dev_priv,
374
			       EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
375
			       intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
376

377 378 379
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);

	/* Start with bits set for DDI_AUX_CTL register */
380
	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
381
					     aux_clock_divider);
382 383 384

	/* Select only valid bits for SRD_AUX_CTL */
	aux_ctl &= psr_aux_mask;
385
	intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
386
		       aux_ctl);
387 388
}

389
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
390
{
391
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
392
	u8 dpcd_val = DP_PSR_ENABLE;
393

394
	/* Enable ALPM at sink for psr2 */
395
	if (intel_dp->psr.psr2_enabled) {
396
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
397 398 399
				   DP_ALPM_ENABLE |
				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);

400
		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
401
	} else {
402
		if (intel_dp->psr.link_standby)
403
			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
404 405 406

		if (INTEL_GEN(dev_priv) >= 8)
			dpcd_val |= DP_PSR_CRC_VERIFICATION;
407 408
	}

409
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
410

411
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
R
Rodrigo Vivi 已提交
412 413
}

414
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
415
{
416
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
417
	u32 val = 0;
418

419 420 421
	if (INTEL_GEN(dev_priv) >= 11)
		val |= EDP_PSR_TP4_TIME_0US;

422
	if (dev_priv->params.psr_safest_params) {
423 424 425 426 427
		val |= EDP_PSR_TP1_TIME_2500us;
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
		goto check_tp3_sel;
	}

428
	if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
429
		val |= EDP_PSR_TP1_TIME_0us;
430
	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
431
		val |= EDP_PSR_TP1_TIME_100us;
432 433
	else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
		val |= EDP_PSR_TP1_TIME_500us;
434
	else
435
		val |= EDP_PSR_TP1_TIME_2500us;
436

437
	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
438
		val |= EDP_PSR_TP2_TP3_TIME_0us;
439
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
440
		val |= EDP_PSR_TP2_TP3_TIME_100us;
441 442
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
		val |= EDP_PSR_TP2_TP3_TIME_500us;
443
	else
444
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
445

446
check_tp3_sel:
447 448 449 450 451 452
	if (intel_dp_source_supports_hbr2(intel_dp) &&
	    drm_dp_tps3_supported(intel_dp->dpcd))
		val |= EDP_PSR_TP1_TP3_SEL;
	else
		val |= EDP_PSR_TP1_TP2_SEL;

453 454 455
	return val;
}

456
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
457 458
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
459
	int idle_frames;
460 461 462 463

	/* Let's use 6 as the minimum to cover all known cases including the
	 * off-by-one issue that HW has in some cases.
	 */
464
	idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
465
	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
466

467
	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
468 469 470 471 472 473 474 475 476 477 478 479
		idle_frames = 0xf;

	return idle_frames;
}

static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 max_sleep_time = 0x1f;
	u32 val = EDP_PSR_ENABLE;

	val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
480 481 482 483 484

	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
	if (IS_HASWELL(dev_priv))
		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;

485
	if (intel_dp->psr.link_standby)
486 487 488 489
		val |= EDP_PSR_LINK_STANDBY;

	val |= intel_psr1_get_tp_time(intel_dp);

490 491 492
	if (INTEL_GEN(dev_priv) >= 8)
		val |= EDP_PSR_CRC_ENABLE;

493
	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
494
		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
495
	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
496
}
497

498
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
499
{
500
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
501
	u32 val = 0;
502

503
	if (dev_priv->params.psr_safest_params)
504
		return EDP_PSR2_TP2_TIME_2500us;
505

506 507
	if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
	    dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
508
		val |= EDP_PSR2_TP2_TIME_50us;
509
	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
510
		val |= EDP_PSR2_TP2_TIME_100us;
511
	else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
512
		val |= EDP_PSR2_TP2_TIME_500us;
513
	else
514
		val |= EDP_PSR2_TP2_TIME_2500us;
515

516 517 518 519 520 521 522 523 524 525 526 527 528 529
	return val;
}

static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 val;

	val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;

	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
		val |= EDP_Y_COORDINATE_ENABLE;

530
	val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
531 532
	val |= intel_psr2_get_tp_time(intel_dp);

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
	if (INTEL_GEN(dev_priv) >= 12) {
		/*
		 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
		 * values from BSpec. In order to setting an optimal power
		 * consumption, lower than 4k resoluition mode needs to decrese
		 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
		 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
		 */
		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
		val |= TGL_EDP_PSR2_FAST_WAKE(7);
	} else if (INTEL_GEN(dev_priv) >= 9) {
		val |= EDP_PSR2_IO_BUFFER_WAKE(7);
		val |= EDP_PSR2_FAST_WAKE(7);
	}

549
	if (intel_dp->psr.psr2_sel_fetch_enabled) {
550
		/* WA 1408330847 */
551
		if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
552 553 554 555 556
		    IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
			intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
				     DIS_RAM_BYPASS_PSR2_MAN_TRACK,
				     DIS_RAM_BYPASS_PSR2_MAN_TRACK);

557
		intel_de_write(dev_priv,
558
			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
559
			       PSR2_MAN_TRK_CTL_ENABLE);
560
	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
561
		intel_de_write(dev_priv,
562
			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
563
	}
564

565
	/*
566 567
	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
	 * recommending keep this bit unset while PSR2 is enabled.
568
	 */
569
	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
570

571
	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
R
Rodrigo Vivi 已提交
572 573
}

574 575 576
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
577 578 579
	if (INTEL_GEN(dev_priv) < 9)
		return false;
	else if (INTEL_GEN(dev_priv) >= 12)
580 581 582 583 584
		return trans == TRANSCODER_A;
	else
		return trans == TRANSCODER_EDP;
}

585 586
static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
{
587
	if (!cstate || !cstate->hw.active)
588 589 590
		return 0;

	return DIV_ROUND_UP(1000 * 1000,
591
			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
592 593
}

594
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
595 596
				     u32 idle_frames)
{
597
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
598 599 600
	u32 val;

	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
601
	val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
602 603
	val &= ~EDP_PSR2_IDLE_FRAME_MASK;
	val |= idle_frames;
604
	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
605 606
}

607
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
608
{
609 610 611
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

	psr2_program_idle_frames(intel_dp, 0);
612 613 614
	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}

615
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
616
{
617
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
618 619

	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
620
	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
621 622
}

623
static void tgl_dc3co_disable_work(struct work_struct *work)
624
{
625 626
	struct intel_dp *intel_dp =
		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
627

628
	mutex_lock(&intel_dp->psr.lock);
629
	/* If delayed work is pending, it is not idle */
630
	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
631 632
		goto unlock;

633
	tgl_psr2_disable_dc3co(intel_dp);
634
unlock:
635
	mutex_unlock(&intel_dp->psr.lock);
636 637
}

638
static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
639
{
640
	if (!intel_dp->psr.dc3co_enabled)
641 642
		return;

643
	cancel_delayed_work(&intel_dp->psr.dc3co_work);
644
	/* Before PSR2 exit disallow dc3co*/
645
	tgl_psr2_disable_dc3co(intel_dp);
646 647
}

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
				  struct intel_crtc_state *crtc_state)
{
	const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	u32 exit_scanlines;

	if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
		return;

	/* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
	if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
	    dig_port->base.port != PORT_A)
		return;

	/*
	 * DC3CO Exit time 200us B.Spec 49196
	 * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
	 */
	exit_scanlines =
		intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;

672
	if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
673 674 675 676 677
		return;

	crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
}

678 679 680 681 682 683 684 685 686
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
					      struct intel_crtc_state *crtc_state)
{
	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;

687 688
	if (!dev_priv->params.enable_psr2_sel_fetch &&
	    intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 sel fetch not enabled, disabled by parameter\n");
		return false;
	}

	if (crtc_state->uapi.async_flip) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 sel fetch not enabled, async flip enabled\n");
		return false;
	}

	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
			drm_dbg_kms(&dev_priv->drm,
				    "PSR2 sel fetch not enabled, plane rotated\n");
			return false;
		}
	}

	return crtc_state->enable_psr2_sel_fetch = true;
}

711 712 713
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
				    struct intel_crtc_state *crtc_state)
{
714
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
715 716
	int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
717
	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
718

719
	if (!intel_dp->psr.sink_psr2_support)
720 721
		return false;

722 723 724 725 726 727
	/* JSL and EHL only supports eDP 1.3 */
	if (IS_JSL_EHL(dev_priv)) {
		drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
		return false;
	}

728
	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
729 730 731
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not supported in transcoder %s\n",
			    transcoder_name(crtc_state->cpu_transcoder));
732 733 734
		return false;
	}

735
	if (!psr2_global_enabled(intel_dp)) {
736 737 738 739
		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
		return false;
	}

740 741 742 743 744
	/*
	 * DSC and PSR2 cannot be enabled simultaneously. If a requested
	 * resolution requires DSC to be enabled, priority is given to DSC
	 * over PSR2.
	 */
745
	if (crtc_state->dsc.compression_enable) {
746 747
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 cannot be enabled since DSC is enabled\n");
748 749 750
		return false;
	}

751 752 753 754 755 756
	if (crtc_state->crc_enabled) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
		return false;
	}

757 758 759
	if (INTEL_GEN(dev_priv) >= 12) {
		psr_max_h = 5120;
		psr_max_v = 3200;
760
		max_bpp = 30;
761
	} else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
762 763
		psr_max_h = 4096;
		psr_max_v = 2304;
764
		max_bpp = 24;
765
	} else if (IS_GEN(dev_priv, 9)) {
766 767
		psr_max_h = 3640;
		psr_max_v = 2304;
768
		max_bpp = 24;
769 770
	}

771
	if (crtc_state->pipe_bpp > max_bpp) {
772 773 774
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, pipe bpp %d > max supported %d\n",
			    crtc_state->pipe_bpp, max_bpp);
775 776 777
		return false;
	}

778 779 780
	/*
	 * HW sends SU blocks of size four scan lines, which means the starting
	 * X coordinate and Y granularity requirements will always be met. We
781 782
	 * only need to validate the SU block width is a multiple of
	 * x granularity.
783
	 */
784
	if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
785 786
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
787
			    crtc_hdisplay, intel_dp->psr.su_x_granularity);
788 789 790
		return false;
	}

791 792 793 794 795 796 797
	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
		    !HAS_PSR_HW_TRACKING(dev_priv)) {
			drm_dbg_kms(&dev_priv->drm,
				    "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
			return false;
		}
798 799
	}

800 801
	if (!crtc_state->enable_psr2_sel_fetch &&
	    (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
802 803 804 805
		drm_dbg_kms(&dev_priv->drm,
			    "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
			    crtc_hdisplay, crtc_vdisplay,
			    psr_max_h, psr_max_v);
806 807 808
		return false;
	}

809
	tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
810 811 812
	return true;
}

813 814
void intel_psr_compute_config(struct intel_dp *intel_dp,
			      struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
815
{
816
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
817
	const struct drm_display_mode *adjusted_mode =
818
		&crtc_state->hw.adjusted_mode;
819
	int psr_setup_time;
R
Rodrigo Vivi 已提交
820

821 822 823 824 825 826 827
	/*
	 * Current PSR panels dont work reliably with VRR enabled
	 * So if VRR is enabled, do not enable PSR.
	 */
	if (crtc_state->vrr.enable)
		return;

828
	if (!CAN_PSR(intel_dp))
829 830
		return;

831
	if (!psr_global_enabled(intel_dp)) {
832
		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
833
		return;
834 835
	}

836
	if (intel_dp->psr.sink_not_reliable) {
837 838
		drm_dbg_kms(&dev_priv->drm,
			    "PSR sink implementation is not reliable\n");
839 840 841
		return;
	}

842
	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
843 844
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Interlaced mode enabled\n");
845
		return;
R
Rodrigo Vivi 已提交
846 847
	}

848 849
	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
	if (psr_setup_time < 0) {
850 851 852
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
			    intel_dp->psr_dpcd[1]);
853
		return;
854 855 856 857
	}

	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
858 859 860
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: PSR setup time (%d us) too long\n",
			    psr_setup_time);
861 862 863 864
		return;
	}

	crtc_state->has_psr = true;
865
	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
866
	crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
R
Rodrigo Vivi 已提交
867 868
}

869
static void intel_psr_activate(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
870
{
871
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
872
	enum transcoder transcoder = intel_dp->psr.transcoder;
R
Rodrigo Vivi 已提交
873

874
	if (transcoder_has_psr2(dev_priv, transcoder))
875
		drm_WARN_ON(&dev_priv->drm,
876
			    intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
877

878
	drm_WARN_ON(&dev_priv->drm,
879 880 881
		    intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
	lockdep_assert_held(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
882

883
	/* psr1 and psr2 are mutually exclusive.*/
884
	if (intel_dp->psr.psr2_enabled)
885 886 887 888
		hsw_activate_psr2(intel_dp);
	else
		hsw_activate_psr1(intel_dp);

889
	intel_dp->psr.active = true;
R
Rodrigo Vivi 已提交
890 891
}

892 893
static void intel_psr_enable_source(struct intel_dp *intel_dp,
				    const struct intel_crtc_state *crtc_state)
894
{
895
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
896
	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
897
	u32 mask;
898

899 900 901 902 903 904
	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
	 * use hardcoded values PSR AUX transactions
	 */
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_psr_setup_aux(intel_dp);

905
	if (intel_dp->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
906
					   !IS_GEMINILAKE(dev_priv))) {
907
		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
908
		u32 chicken = intel_de_read(dev_priv, reg);
909

910 911
		chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
			   PSR2_ADD_VERTICAL_LINE_COUNT;
912
		intel_de_write(dev_priv, reg, chicken);
913
	}
914 915 916 917 918 919 920

	/*
	 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
	 * mask LPSP to avoid dependency on other drivers that might block
	 * runtime_pm besides preventing  other hw tracking issues now we
	 * can rely on frontbuffer tracking.
	 */
921 922 923 924 925 926 927 928
	mask = EDP_PSR_DEBUG_MASK_MEMUP |
	       EDP_PSR_DEBUG_MASK_HPD |
	       EDP_PSR_DEBUG_MASK_LPSP |
	       EDP_PSR_DEBUG_MASK_MAX_SLEEP;

	if (INTEL_GEN(dev_priv) < 11)
		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;

929
	intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
930
		       mask);
931

932
	psr_irq_control(intel_dp);
933 934 935 936 937 938 939 940

	if (crtc_state->dc3co_exitline) {
		u32 val;

		/*
		 * TODO: if future platforms supports DC3CO in more than one
		 * transcoder, EXITLINE will need to be unset when disabling PSR
		 */
941
		val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
942 943 944
		val &= ~EXITLINE_MASK;
		val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
		val |= EXITLINE_ENABLE;
945
		intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
946
	}
947

948
	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
949
		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
950
			     intel_dp->psr.psr2_sel_fetch_enabled ?
951
			     IGNORE_PSR2_HW_TRACKING : 0);
952 953
}

954
static void intel_psr_enable_locked(struct intel_dp *intel_dp,
955 956
				    const struct intel_crtc_state *crtc_state,
				    const struct drm_connector_state *conn_state)
957
{
958
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
959
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
960
	struct intel_encoder *encoder = &dig_port->base;
961
	u32 val;
962

963
	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
964

965 966 967 968 969
	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
	intel_dp->psr.busy_frontbuffer_bits = 0;
	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
	intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
970 971
	/* DC5/DC6 requires at least 6 idle frames */
	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
972 973
	intel_dp->psr.dc3co_exit_delay = val;
	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
974 975 976 977 978 979 980 981 982

	/*
	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
	 * will still keep the error set even after the reset done in the
	 * irq_preinstall and irq_uninstall hooks.
	 * And enabling in this situation cause the screen to freeze in the
	 * first time that PSR HW tries to activate so lets keep PSR disabled
	 * to avoid any rendering problems.
	 */
983
	if (INTEL_GEN(dev_priv) >= 12) {
984
		val = intel_de_read(dev_priv,
985
				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
986 987
		val &= EDP_PSR_ERROR(0);
	} else {
988
		val = intel_de_read(dev_priv, EDP_PSR_IIR);
989
		val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
990
	}
991
	if (val) {
992
		intel_dp->psr.sink_not_reliable = true;
993 994
		drm_dbg_kms(&dev_priv->drm,
			    "PSR interruption error set, not enabling PSR\n");
995 996
		return;
	}
997

998
	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
999
		    intel_dp->psr.psr2_enabled ? "2" : "1");
1000
	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
1001 1002
				     &intel_dp->psr.vsc);
	intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
1003 1004
	intel_psr_enable_sink(intel_dp);
	intel_psr_enable_source(intel_dp, crtc_state);
1005
	intel_dp->psr.enabled = true;
1006 1007 1008 1009

	intel_psr_activate(intel_dp);
}

R
Rodrigo Vivi 已提交
1010 1011 1012
/**
 * intel_psr_enable - Enable PSR
 * @intel_dp: Intel DP
1013
 * @crtc_state: new CRTC state
1014
 * @conn_state: new CONNECTOR state
R
Rodrigo Vivi 已提交
1015 1016 1017
 *
 * This function can only be called after the pipe is fully trained and enabled.
 */
1018
void intel_psr_enable(struct intel_dp *intel_dp,
1019 1020
		      const struct intel_crtc_state *crtc_state,
		      const struct drm_connector_state *conn_state)
R
Rodrigo Vivi 已提交
1021
{
1022
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
R
Rodrigo Vivi 已提交
1023

1024
	if (!CAN_PSR(intel_dp))
R
Rodrigo Vivi 已提交
1025 1026
		return;

1027
	if (!crtc_state->has_psr)
1028 1029
		return;

1030
	drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
1031

1032 1033 1034
	mutex_lock(&intel_dp->psr.lock);
	intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
	mutex_unlock(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
1035 1036
}

1037
static void intel_psr_exit(struct intel_dp *intel_dp)
1038
{
1039
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1040 1041
	u32 val;

1042 1043
	if (!intel_dp->psr.active) {
		if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
1044
			val = intel_de_read(dev_priv,
1045
					    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1046
			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1047 1048
		}

1049
		val = intel_de_read(dev_priv,
1050
				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1051
		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1052

1053
		return;
1054
	}
1055

1056 1057
	if (intel_dp->psr.psr2_enabled) {
		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1058
		val = intel_de_read(dev_priv,
1059
				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
1060
		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1061
		val &= ~EDP_PSR2_ENABLE;
1062
		intel_de_write(dev_priv,
1063
			       EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
1064
	} else {
1065
		val = intel_de_read(dev_priv,
1066
				    EDP_PSR_CTL(intel_dp->psr.transcoder));
1067
		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1068
		val &= ~EDP_PSR_ENABLE;
1069
		intel_de_write(dev_priv,
1070
			       EDP_PSR_CTL(intel_dp->psr.transcoder), val);
1071
	}
1072
	intel_dp->psr.active = false;
1073 1074
}

1075
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
1076
{
1077
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1078 1079
	i915_reg_t psr_status;
	u32 psr_status_mask;
R
Rodrigo Vivi 已提交
1080

1081
	lockdep_assert_held(&intel_dp->psr.lock);
1082

1083
	if (!intel_dp->psr.enabled)
1084 1085
		return;

1086
	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
1087
		    intel_dp->psr.psr2_enabled ? "2" : "1");
1088

1089
	intel_psr_exit(intel_dp);
1090

1091 1092
	if (intel_dp->psr.psr2_enabled) {
		psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1093
		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1094
	} else {
1095
		psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1096
		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1097
	}
1098 1099

	/* Wait till PSR is idle */
1100 1101
	if (intel_de_wait_for_clear(dev_priv, psr_status,
				    psr_status_mask, 2000))
1102
		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
1103

1104
	/* WA 1408330847 */
1105
	if (intel_dp->psr.psr2_sel_fetch_enabled &&
1106
	    (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
1107 1108 1109 1110
	     IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
			     DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);

1111 1112 1113
	/* Disable PSR on Sink */
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);

1114
	if (intel_dp->psr.psr2_enabled)
1115 1116
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);

1117
	intel_dp->psr.enabled = false;
1118 1119
}

1120 1121 1122
/**
 * intel_psr_disable - Disable PSR
 * @intel_dp: Intel DP
1123
 * @old_crtc_state: old CRTC state
1124 1125 1126
 *
 * This function needs to be called before disabling pipe.
 */
1127 1128
void intel_psr_disable(struct intel_dp *intel_dp,
		       const struct intel_crtc_state *old_crtc_state)
1129
{
1130
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1131

1132
	if (!old_crtc_state->has_psr)
1133 1134
		return;

1135
	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1136 1137
		return;

1138
	mutex_lock(&intel_dp->psr.lock);
1139

1140
	intel_psr_disable_locked(intel_dp);
1141

1142 1143 1144
	mutex_unlock(&intel_dp->psr.lock);
	cancel_work_sync(&intel_dp->psr.work);
	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
R
Rodrigo Vivi 已提交
1145 1146
}

1147
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
1148
{
1149 1150
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	if (IS_TIGERLAKE(dev_priv))
		/*
		 * Writes to CURSURFLIVE in TGL are causing IOMMU errors and
		 * visual glitches that are often reproduced when executing
		 * CPU intensive workloads while a eDP 4K panel is attached.
		 *
		 * Manually exiting PSR causes the frontbuffer to be updated
		 * without glitches and the IOMMU errors are also gone but
		 * this comes at the cost of less time with PSR active.
		 *
		 * So using this workaround until this issue is root caused
		 * and a better fix is found.
		 */
1164
		intel_psr_exit(intel_dp);
1165
	else if (INTEL_GEN(dev_priv) >= 9)
1166 1167 1168 1169 1170 1171 1172 1173 1174
		/*
		 * Display WA #0884: skl+
		 * This documented WA for bxt can be safely applied
		 * broadly so we can force HW tracking to exit PSR
		 * instead of disabling and re-enabling.
		 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
		 * but it makes more sense write to the current active
		 * pipe.
		 */
1175
		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
1176 1177 1178 1179 1180
	else
		/*
		 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
		 * on older gens so doing the manual exit instead.
		 */
1181
		intel_psr_exit(intel_dp);
1182 1183
}

1184 1185 1186 1187 1188 1189 1190
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
					const struct intel_crtc_state *crtc_state,
					const struct intel_plane_state *plane_state,
					int color_plane)
{
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
	enum pipe pipe = plane->pipe;
1191
	const struct drm_rect *clip;
1192 1193
	u32 val, offset;
	int ret, x, y;
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203

	if (!crtc_state->enable_psr2_sel_fetch)
		return;

	val = plane_state ? plane_state->ctl : 0;
	val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
	if (!val || plane->id == PLANE_CURSOR)
		return;

1204 1205 1206 1207
	clip = &plane_state->psr2_sel_fetch_area;

	val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
	val |= plane_state->uapi.dst.x1;
1208 1209
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);

1210 1211 1212 1213 1214 1215 1216 1217
	/* TODO: consider auxiliary surfaces */
	x = plane_state->uapi.src.x1 >> 16;
	y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
	ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
	if (ret)
		drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
			      ret);
	val = y << 16 | x;
1218 1219 1220 1221
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
			  val);

	/* Sizes are 0 based */
1222
	val = (drm_rect_height(clip) - 1) << 16;
1223 1224 1225 1226
	val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
	intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
}

1227 1228
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
1229
	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1230 1231 1232 1233 1234

	if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
	    !crtc_state->enable_psr2_sel_fetch)
		return;

1235 1236
	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
		       crtc_state->psr2_man_track_ctl);
1237 1238
}

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
				  struct drm_rect *clip, bool full_update)
{
	u32 val = PSR2_MAN_TRK_CTL_ENABLE;

	if (full_update) {
		val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
		goto exit;
	}

	if (clip->y1 == -1)
		goto exit;

1252 1253
	drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);

1254 1255
	val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
	val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
1256
	val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
exit:
	crtc_state->psr2_man_track_ctl = val;
}

static void clip_area_update(struct drm_rect *overlap_damage_area,
			     struct drm_rect *damage_area)
{
	if (overlap_damage_area->y1 == -1) {
		overlap_damage_area->y1 = damage_area->y1;
		overlap_damage_area->y2 = damage_area->y2;
		return;
	}

	if (damage_area->y1 < overlap_damage_area->y1)
		overlap_damage_area->y1 = damage_area->y1;

	if (damage_area->y2 > overlap_damage_area->y2)
		overlap_damage_area->y2 = damage_area->y2;
}

int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
				struct intel_crtc *crtc)
1279 1280
{
	struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1281
	struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
1282 1283 1284 1285
	struct intel_plane_state *new_plane_state, *old_plane_state;
	struct intel_plane *plane;
	bool full_update = false;
	int i, ret;
1286 1287

	if (!crtc_state->enable_psr2_sel_fetch)
1288 1289 1290 1291 1292 1293
		return 0;

	ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
	if (ret)
		return ret;

1294 1295 1296 1297 1298 1299
	/*
	 * Calculate minimal selective fetch area of each plane and calculate
	 * the pipe damaged area.
	 * In the next loop the plane selective fetch area will actually be set
	 * using whole pipe damaged area.
	 */
1300 1301
	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
					     new_plane_state, i) {
1302 1303 1304
		struct drm_rect src, damaged_area = { .y1 = -1 };
		struct drm_mode_rect *damaged_clips;
		u32 num_clips, j;
1305 1306 1307 1308

		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
			continue;

1309 1310 1311 1312
		if (!new_plane_state->uapi.visible &&
		    !old_plane_state->uapi.visible)
			continue;

1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
		/*
		 * TODO: Not clear how to handle planes with negative position,
		 * also planes are not updated if they have a negative X
		 * position so for now doing a full update in this cases
		 */
		if (new_plane_state->uapi.dst.y1 < 0 ||
		    new_plane_state->uapi.dst.x1 < 0) {
			full_update = true;
			break;
		}

1324
		num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
1325 1326

		/*
1327 1328 1329
		 * If visibility or plane moved, mark the whole plane area as
		 * damaged as it needs to be complete redraw in the new and old
		 * position.
1330
		 */
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
		if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
		    !drm_rect_equals(&new_plane_state->uapi.dst,
				     &old_plane_state->uapi.dst)) {
			if (old_plane_state->uapi.visible) {
				damaged_area.y1 = old_plane_state->uapi.dst.y1;
				damaged_area.y2 = old_plane_state->uapi.dst.y2;
				clip_area_update(&pipe_clip, &damaged_area);
			}

			if (new_plane_state->uapi.visible) {
				damaged_area.y1 = new_plane_state->uapi.dst.y1;
				damaged_area.y2 = new_plane_state->uapi.dst.y2;
				clip_area_update(&pipe_clip, &damaged_area);
			}
			continue;
		} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
			   (!num_clips &&
			    new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
			/*
			 * If the plane don't have damaged areas but the
			 * framebuffer changed or alpha changed, mark the whole
			 * plane area as damaged.
			 */
			damaged_area.y1 = new_plane_state->uapi.dst.y1;
			damaged_area.y2 = new_plane_state->uapi.dst.y2;
			clip_area_update(&pipe_clip, &damaged_area);
			continue;
		}

		drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
		damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);

		for (j = 0; j < num_clips; j++) {
			struct drm_rect clip;

			clip.x1 = damaged_clips[j].x1;
			clip.y1 = damaged_clips[j].y1;
			clip.x2 = damaged_clips[j].x2;
			clip.y2 = damaged_clips[j].y2;
			if (drm_rect_intersect(&clip, &src))
				clip_area_update(&damaged_area, &clip);
		}

		if (damaged_area.y1 == -1)
			continue;

		damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
		damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
		clip_area_update(&pipe_clip, &damaged_area);
	}

	if (full_update)
		goto skip_sel_fetch_set_loop;
1384

1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
	/* It must be aligned to 4 lines */
	pipe_clip.y1 -= pipe_clip.y1 % 4;
	if (pipe_clip.y2 % 4)
		pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4;

	/*
	 * Now that we have the pipe damaged area check if it intersect with
	 * every plane, if it does set the plane selective fetch area.
	 */
	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
					     new_plane_state, i) {
		struct drm_rect *sel_fetch_area, inter;

		if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
		    !new_plane_state->uapi.visible)
			continue;

		inter = pipe_clip;
		if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
			continue;

		sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
		sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
		sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
1409
	}
1410

1411
skip_sel_fetch_set_loop:
1412 1413
	psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
	return 0;
1414 1415
}

1416 1417 1418 1419
/**
 * intel_psr_update - Update PSR state
 * @intel_dp: Intel DP
 * @crtc_state: new CRTC state
1420
 * @conn_state: new CONNECTOR state
1421 1422 1423 1424 1425 1426
 *
 * This functions will update PSR states, disabling, enabling or switching PSR
 * version when executing fastsets. For full modeset, intel_psr_disable() and
 * intel_psr_enable() should be called instead.
 */
void intel_psr_update(struct intel_dp *intel_dp,
1427 1428
		      const struct intel_crtc_state *crtc_state,
		      const struct drm_connector_state *conn_state)
1429 1430
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1431
	struct intel_psr *psr = &intel_dp->psr;
1432 1433
	bool enable, psr2_enable;

1434
	if (!CAN_PSR(intel_dp))
1435 1436
		return;

1437
	mutex_lock(&intel_dp->psr.lock);
1438

1439 1440
	enable = crtc_state->has_psr;
	psr2_enable = crtc_state->has_psr2;
1441

1442 1443
	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
	    crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
1444 1445
		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
		if (crtc_state->crc_enabled && psr->enabled)
1446
			psr_force_hw_tracking_exit(intel_dp);
1447 1448 1449 1450 1451
		else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
			/*
			 * Activate PSR again after a force exit when enabling
			 * CRC in older gens
			 */
1452 1453 1454
			if (!intel_dp->psr.active &&
			    !intel_dp->psr.busy_frontbuffer_bits)
				schedule_work(&intel_dp->psr.work);
1455
		}
1456

1457
		goto unlock;
1458
	}
1459

1460 1461
	if (psr->enabled)
		intel_psr_disable_locked(intel_dp);
1462

1463
	if (enable)
1464
		intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
1465 1466

unlock:
1467
	mutex_unlock(&intel_dp->psr.lock);
1468 1469
}

1470
/**
1471 1472
 * psr_wait_for_idle - wait for PSR1 to idle
 * @intel_dp: Intel DP
1473 1474 1475
 * @out_value: PSR status in case of failure
 *
 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1476
 *
1477
 */
1478
static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
1479
{
1480
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1481 1482

	/*
1483 1484 1485 1486
	 * From bspec: Panel Self Refresh (BDW+)
	 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
	 * defensive enough to cover everything.
1487
	 */
1488
	return __intel_wait_for_register(&dev_priv->uncore,
1489
					 EDP_PSR_STATUS(intel_dp->psr.transcoder),
1490
					 EDP_PSR_STATUS_STATE_MASK,
1491 1492
					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
					 out_value);
1493 1494
}

1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
/**
 * intel_psr_wait_for_idle - wait for PSR1 to idle
 * @new_crtc_state: new CRTC state
 *
 * This function is expected to be called from pipe_update_start() where it is
 * not expected to race with PSR enable or disable.
 */
void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
{
	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
	struct intel_encoder *encoder;

	if (!new_crtc_state->has_psr)
		return;

1510 1511
	for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
					     new_crtc_state->uapi.encoder_mask) {
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
		u32 psr_status;

		mutex_lock(&intel_dp->psr.lock);
		if (!intel_dp->psr.enabled ||
		    (intel_dp->psr.enabled && intel_dp->psr.psr2_enabled)) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}

		/* when the PSR1 is enabled */
		if (psr_wait_for_idle(intel_dp, &psr_status))
			drm_err(&dev_priv->drm,
				"PSR idle timed out 0x%x, atomic update may fail\n",
				psr_status);
		mutex_unlock(&intel_dp->psr.lock);
	}
}

static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
1532
{
1533
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1534 1535 1536 1537
	i915_reg_t reg;
	u32 mask;
	int err;

1538
	if (!intel_dp->psr.enabled)
1539
		return false;
R
Rodrigo Vivi 已提交
1540

1541 1542
	if (intel_dp->psr.psr2_enabled) {
		reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
1543
		mask = EDP_PSR2_STATUS_STATE_MASK;
1544
	} else {
1545
		reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
1546
		mask = EDP_PSR_STATUS_STATE_MASK;
R
Rodrigo Vivi 已提交
1547
	}
1548

1549
	mutex_unlock(&intel_dp->psr.lock);
1550

1551
	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1552
	if (err)
1553 1554
		drm_err(&dev_priv->drm,
			"Timed out waiting for PSR Idle for re-enable\n");
1555 1556

	/* After the unlocked wait, verify that PSR is still wanted! */
1557 1558
	mutex_lock(&intel_dp->psr.lock);
	return err == 0 && intel_dp->psr.enabled;
1559
}
R
Rodrigo Vivi 已提交
1560

1561
static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1562
{
1563
	struct drm_connector_list_iter conn_iter;
1564 1565 1566
	struct drm_device *dev = &dev_priv->drm;
	struct drm_modeset_acquire_ctx ctx;
	struct drm_atomic_state *state;
1567 1568
	struct drm_connector *conn;
	int err = 0;
1569

1570 1571 1572
	state = drm_atomic_state_alloc(dev);
	if (!state)
		return -ENOMEM;
1573

1574 1575 1576 1577 1578
	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
	state->acquire_ctx = &ctx;

retry:

1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	drm_connector_list_iter_begin(dev, &conn_iter);
	drm_for_each_connector_iter(conn, &conn_iter) {
		struct drm_connector_state *conn_state;
		struct drm_crtc_state *crtc_state;

		if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
			continue;

		conn_state = drm_atomic_get_connector_state(state, conn);
		if (IS_ERR(conn_state)) {
			err = PTR_ERR(conn_state);
			break;
1591 1592
		}

1593 1594 1595 1596 1597 1598
		if (!conn_state->crtc)
			continue;

		crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
		if (IS_ERR(crtc_state)) {
			err = PTR_ERR(crtc_state);
1599 1600
			break;
		}
1601 1602 1603

		/* Mark mode as changed to trigger a pipe->update() */
		crtc_state->mode_changed = true;
1604
	}
1605
	drm_connector_list_iter_end(&conn_iter);
1606

1607 1608
	if (err == 0)
		err = drm_atomic_commit(state);
1609

1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
	if (err == -EDEADLK) {
		drm_atomic_state_clear(state);
		err = drm_modeset_backoff(&ctx);
		if (!err)
			goto retry;
	}

	drm_modeset_drop_locks(&ctx);
	drm_modeset_acquire_fini(&ctx);
	drm_atomic_state_put(state);

	return err;
1622 1623
}

1624
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
1625
{
1626
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1627 1628
	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
	u32 old_mode;
1629 1630 1631
	int ret;

	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1632
	    mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
1633
		drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
1634 1635 1636
		return -EINVAL;
	}

1637
	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
1638 1639 1640
	if (ret)
		return ret;

1641 1642
	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
	intel_dp->psr.debug = val;
1643 1644 1645 1646 1647

	/*
	 * Do it right away if it's already enabled, otherwise it will be done
	 * when enabling the source.
	 */
1648 1649
	if (intel_dp->psr.enabled)
		psr_irq_control(intel_dp);
1650

1651
	mutex_unlock(&intel_dp->psr.lock);
1652 1653 1654 1655

	if (old_mode != mode)
		ret = intel_psr_fastset_force(dev_priv);

1656 1657 1658
	return ret;
}

1659
static void intel_psr_handle_irq(struct intel_dp *intel_dp)
1660
{
1661
	struct intel_psr *psr = &intel_dp->psr;
1662

1663
	intel_psr_disable_locked(intel_dp);
1664 1665
	psr->sink_not_reliable = true;
	/* let's make sure that sink is awaken */
1666
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1667 1668
}

1669 1670
static void intel_psr_work(struct work_struct *work)
{
1671 1672
	struct intel_dp *intel_dp =
		container_of(work, typeof(*intel_dp), psr.work);
1673

1674
	mutex_lock(&intel_dp->psr.lock);
1675

1676
	if (!intel_dp->psr.enabled)
1677 1678
		goto unlock;

1679 1680
	if (READ_ONCE(intel_dp->psr.irq_aux_error))
		intel_psr_handle_irq(intel_dp);
1681

1682 1683 1684 1685 1686 1687
	/*
	 * We have to make sure PSR is ready for re-enable
	 * otherwise it keeps disabled until next full enable/disable cycle.
	 * PSR might take some time to get fully disabled
	 * and be ready for re-enable.
	 */
1688
	if (!__psr_wait_for_idle_locked(intel_dp))
R
Rodrigo Vivi 已提交
1689 1690 1691 1692 1693 1694 1695
		goto unlock;

	/*
	 * The delayed work can race with an invalidate hence we need to
	 * recheck. Since psr_flush first clears this and then reschedules we
	 * won't ever miss a flush when bailing out here.
	 */
1696
	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
R
Rodrigo Vivi 已提交
1697 1698
		goto unlock;

1699
	intel_psr_activate(intel_dp);
R
Rodrigo Vivi 已提交
1700
unlock:
1701
	mutex_unlock(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
1702 1703
}

R
Rodrigo Vivi 已提交
1704 1705
/**
 * intel_psr_invalidate - Invalidade PSR
1706
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1707
 * @frontbuffer_bits: frontbuffer plane tracking bits
1708
 * @origin: which operation caused the invalidate
R
Rodrigo Vivi 已提交
1709 1710 1711 1712 1713 1714 1715 1716
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 */
1717
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1718
			  unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1719
{
1720
	struct intel_encoder *encoder;
1721

1722
	if (origin == ORIGIN_FLIP)
1723 1724
		return;

1725
	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1726 1727
		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
R
Rodrigo Vivi 已提交
1728

1729 1730 1731 1732 1733
		mutex_lock(&intel_dp->psr.lock);
		if (!intel_dp->psr.enabled) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}
1734

1735 1736 1737
		pipe_frontbuffer_bits &=
			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
1738

1739 1740
		if (pipe_frontbuffer_bits)
			intel_psr_exit(intel_dp);
R
Rodrigo Vivi 已提交
1741

1742 1743 1744
		mutex_unlock(&intel_dp->psr.lock);
	}
}
1745 1746 1747 1748
/*
 * When we will be completely rely on PSR2 S/W tracking in future,
 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
 * event also therefore tgl_dc3co_flush() require to be changed
1749
 * accordingly in future.
1750 1751
 */
static void
1752 1753
tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
		enum fb_op_origin origin)
1754
{
1755
	mutex_lock(&intel_dp->psr.lock);
1756

1757
	if (!intel_dp->psr.dc3co_enabled)
1758 1759
		goto unlock;

1760
	if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
1761 1762 1763 1764 1765 1766 1767
		goto unlock;

	/*
	 * At every frontbuffer flush flip event modified delay of delayed work,
	 * when delayed work schedules that means display has been idle.
	 */
	if (!(frontbuffer_bits &
1768
	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
1769 1770
		goto unlock;

1771 1772 1773
	tgl_psr2_enable_dc3co(intel_dp);
	mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
			 intel_dp->psr.dc3co_exit_delay);
1774 1775

unlock:
1776
	mutex_unlock(&intel_dp->psr.lock);
1777 1778
}

R
Rodrigo Vivi 已提交
1779 1780
/**
 * intel_psr_flush - Flush PSR
1781
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1782
 * @frontbuffer_bits: frontbuffer plane tracking bits
1783
 * @origin: which operation caused the flush
R
Rodrigo Vivi 已提交
1784 1785 1786 1787 1788 1789 1790 1791
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering has completed and flushed out to memory. PSR
 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
 */
1792
void intel_psr_flush(struct drm_i915_private *dev_priv,
1793
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1794
{
1795
	struct intel_encoder *encoder;
1796

1797
	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
1798 1799
		unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1800

1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
		if (origin == ORIGIN_FLIP) {
			tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
			continue;
		}

		mutex_lock(&intel_dp->psr.lock);
		if (!intel_dp->psr.enabled) {
			mutex_unlock(&intel_dp->psr.lock);
			continue;
		}
R
Rodrigo Vivi 已提交
1811

1812 1813 1814
		pipe_frontbuffer_bits &=
			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
		intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
R
Rodrigo Vivi 已提交
1815

1816 1817 1818
		/* By definition flush = invalidate + flush */
		if (pipe_frontbuffer_bits)
			psr_force_hw_tracking_exit(intel_dp);
1819

1820 1821 1822 1823
		if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
			schedule_work(&intel_dp->psr.work);
		mutex_unlock(&intel_dp->psr.lock);
	}
R
Rodrigo Vivi 已提交
1824 1825
}

R
Rodrigo Vivi 已提交
1826 1827
/**
 * intel_psr_init - Init basic PSR work and mutex.
1828
 * @intel_dp: Intel DP
R
Rodrigo Vivi 已提交
1829
 *
1830 1831 1832
 * This function is called after the initializing connector.
 * (the initializing of connector treats the handling of connector capabilities)
 * And it initializes basic PSR stuff for each DP Encoder.
R
Rodrigo Vivi 已提交
1833
 */
1834
void intel_psr_init(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
1835
{
1836 1837 1838
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);

1839 1840 1841
	if (!HAS_PSR(dev_priv))
		return;

1842
	if (!intel_dp->psr.sink_support)
1843 1844
		return;

1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
	/*
	 * HSW spec explicitly says PSR is tied to port A.
	 * BDW+ platforms have a instance of PSR registers per transcoder but
	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
	 * than eDP one.
	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
	 * But GEN12 supports a instance of PSR registers per transcoder.
	 */
	if (INTEL_GEN(dev_priv) < 12 && dig_port->base.port != PORT_A) {
		drm_dbg_kms(&dev_priv->drm,
			    "PSR condition failed: Port not supported\n");
		return;
	}

	intel_dp->psr.source_support = true;

1862 1863 1864 1865 1866 1867 1868 1869
	if (IS_HASWELL(dev_priv))
		/*
		 * HSW don't have PSR registers on the same space as transcoder
		 * so set this to a value that when subtract to the register
		 * in transcoder space results in the right offset for HSW
		 */
		dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;

1870
	if (dev_priv->params.enable_psr == -1)
1871
		if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1872
			dev_priv->params.enable_psr = 0;
1873

1874
	/* Set link_standby x link_off defaults */
1875
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1876
		/* HSW and BDW require workarounds that we don't implement. */
1877
		intel_dp->psr.link_standby = false;
1878 1879
	else if (INTEL_GEN(dev_priv) < 12)
		/* For new platforms up to TGL let's respect VBT back again */
1880
		intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
1881

1882 1883 1884
	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
	mutex_init(&intel_dp->psr.lock);
R
Rodrigo Vivi 已提交
1885
}
1886

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
					   u8 *status, u8 *error_status)
{
	struct drm_dp_aux *aux = &intel_dp->aux;
	int ret;

	ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
	if (ret != 1)
		return ret;

	ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
	if (ret != 1)
		return ret;

	*status = *status & DP_PSR_SINK_STATE_MASK;

	return 0;
}

1906 1907 1908 1909
static void psr_alpm_check(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
	struct drm_dp_aux *aux = &intel_dp->aux;
1910
	struct intel_psr *psr = &intel_dp->psr;
1911 1912 1913 1914 1915 1916 1917 1918
	u8 val;
	int r;

	if (!psr->psr2_enabled)
		return;

	r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
	if (r != 1) {
1919
		drm_err(&dev_priv->drm, "Error reading ALPM status\n");
1920 1921 1922 1923 1924 1925
		return;
	}

	if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
		intel_psr_disable_locked(intel_dp);
		psr->sink_not_reliable = true;
1926 1927
		drm_dbg_kms(&dev_priv->drm,
			    "ALPM lock timeout error, disabling PSR\n");
1928 1929 1930 1931 1932 1933

		/* Clearing error */
		drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
	}
}

1934 1935 1936
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1937
	struct intel_psr *psr = &intel_dp->psr;
1938 1939 1940 1941 1942
	u8 val;
	int r;

	r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
	if (r != 1) {
1943
		drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
1944 1945 1946 1947 1948 1949
		return;
	}

	if (val & DP_PSR_CAPS_CHANGE) {
		intel_psr_disable_locked(intel_dp);
		psr->sink_not_reliable = true;
1950 1951
		drm_dbg_kms(&dev_priv->drm,
			    "Sink PSR capability changed, disabling PSR\n");
1952 1953 1954 1955 1956 1957

		/* Clearing it */
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
	}
}

1958 1959
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
1960
	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1961
	struct intel_psr *psr = &intel_dp->psr;
1962
	u8 status, error_status;
1963
	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1964 1965
			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
			  DP_PSR_LINK_CRC_ERROR;
1966

1967
	if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
1968 1969 1970 1971
		return;

	mutex_lock(&psr->lock);

1972
	if (!psr->enabled)
1973 1974
		goto exit;

1975
	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
1976 1977
		drm_err(&dev_priv->drm,
			"Error reading PSR status or error status\n");
1978 1979 1980
		goto exit;
	}

1981
	if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
1982
		intel_psr_disable_locked(intel_dp);
1983
		psr->sink_not_reliable = true;
1984 1985
	}

1986
	if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
1987 1988
		drm_dbg_kms(&dev_priv->drm,
			    "PSR sink internal error, disabling PSR\n");
1989
	if (error_status & DP_PSR_RFB_STORAGE_ERROR)
1990 1991
		drm_dbg_kms(&dev_priv->drm,
			    "PSR RFB storage error, disabling PSR\n");
1992
	if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1993 1994
		drm_dbg_kms(&dev_priv->drm,
			    "PSR VSC SDP uncorrectable error, disabling PSR\n");
1995
	if (error_status & DP_PSR_LINK_CRC_ERROR)
1996 1997
		drm_dbg_kms(&dev_priv->drm,
			    "PSR Link CRC error, disabling PSR\n");
1998

1999
	if (error_status & ~errors)
2000 2001 2002
		drm_err(&dev_priv->drm,
			"PSR_ERROR_STATUS unhandled errors %x\n",
			error_status & ~errors);
2003
	/* clear status register */
2004
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2005 2006

	psr_alpm_check(intel_dp);
2007
	psr_capability_changed_check(intel_dp);
2008

2009 2010 2011
exit:
	mutex_unlock(&psr->lock);
}
2012 2013 2014 2015 2016

bool intel_psr_enabled(struct intel_dp *intel_dp)
{
	bool ret;

2017
	if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
2018 2019
		return false;

2020 2021 2022
	mutex_lock(&intel_dp->psr.lock);
	ret = intel_dp->psr.enabled;
	mutex_unlock(&intel_dp->psr.lock);
2023 2024 2025

	return ret;
}