intel_psr.c 36.1 KB
Newer Older
R
Rodrigo Vivi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
/**
 * DOC: Panel Self Refresh (PSR/SRD)
 *
 * Since Haswell Display controller supports Panel Self-Refresh on display
 * panels witch have a remote frame buffer (RFB) implemented according to PSR
 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
 * when system is idle but display is on as it eliminates display refresh
 * request to DDR memory completely as long as the frame buffer for that
 * display is unchanged.
 *
 * Panel Self Refresh must be supported by both Hardware (source) and
 * Panel (sink).
 *
 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
 * to power down the link and memory controller. For DSI panels the same idea
 * is called "manual mode".
 *
 * The implementation uses the hardware-based PSR support which automatically
 * enters/exits self-refresh mode. The hardware takes care of sending the
 * required DP aux message and could even retrain the link (that part isn't
 * enabled yet though). The hardware also keeps track of any frontbuffer
 * changes to know when to exit self-refresh mode again. Unfortunately that
 * part doesn't work too well, hence why the i915 PSR support uses the
 * software frontbuffer tracking to make sure it doesn't miss a screen
 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
 * get called by the frontbuffer tracking code. Note that because of locking
 * issues the self-refresh re-enable code is done from a work queue, which
 * must be correctly synchronized/cancelled when shutting down the pipe."
 */

R
Rodrigo Vivi 已提交
54 55 56 57 58
#include <drm/drmP.h>

#include "intel_drv.h"
#include "i915_drv.h"

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
static inline enum intel_display_power_domain
psr_aux_domain(struct intel_dp *intel_dp)
{
	/* CNL HW requires corresponding AUX IOs to be powered up for PSR.
	 * However, for non-A AUX ports the corresponding non-EDP transcoders
	 * would have already enabled power well 2 and DC_OFF. This means we can
	 * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
	 * specific AUX_IO reference without powering up any extra wells.
	 * Note that PSR is enabled only on Port A even though this function
	 * returns the correct domain for other ports too.
	 */
	return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
					      intel_dp->aux_power_domain;
}

static void psr_aux_io_power_get(struct intel_dp *intel_dp)
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);

	if (INTEL_GEN(dev_priv) < 10)
		return;

	intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
}

static void psr_aux_io_power_put(struct intel_dp *intel_dp)
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);

	if (INTEL_GEN(dev_priv) < 10)
		return;

	intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
{
	u32 debug_mask, mask;

	/* No PSR interrupts on VLV/CHV */
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		return;

	mask = EDP_PSR_ERROR(TRANSCODER_EDP);
	debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
		     EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);

	if (INTEL_GEN(dev_priv) >= 8) {
		mask |= EDP_PSR_ERROR(TRANSCODER_A) |
			EDP_PSR_ERROR(TRANSCODER_B) |
			EDP_PSR_ERROR(TRANSCODER_C);

		debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
			      EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
			      EDP_PSR_POST_EXIT(TRANSCODER_B) |
			      EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
			      EDP_PSR_POST_EXIT(TRANSCODER_C) |
			      EDP_PSR_PRE_ENTRY(TRANSCODER_C);
	}

	if (debug)
		mask |= debug_mask;

	WRITE_ONCE(dev_priv->psr.debug, debug);
	I915_WRITE(EDP_PSR_IMR, ~mask);
}

void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
{
	u32 transcoders = BIT(TRANSCODER_EDP);
	enum transcoder cpu_transcoder;
132
	ktime_t time_ns =  ktime_get();
133 134 135 136 137 138 139 140 141 142 143 144

	if (INTEL_GEN(dev_priv) >= 8)
		transcoders |= BIT(TRANSCODER_A) |
			       BIT(TRANSCODER_B) |
			       BIT(TRANSCODER_C);

	for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
		/* FIXME: Exit PSR and link train manually when this happens. */
		if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
			DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
				      transcoder_name(cpu_transcoder));

145 146
		if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
			dev_priv->psr.last_entry_attempt = time_ns;
147 148
			DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
				      transcoder_name(cpu_transcoder));
149
		}
150

151 152
		if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
			dev_priv->psr.last_exit = time_ns;
153 154
			DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
				      transcoder_name(cpu_transcoder));
155
		}
156 157 158
	}
}

159
static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
{
	uint8_t psr_caps = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
		return false;
	return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
}

static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
	uint8_t dprx = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
			      &dprx) != 1)
		return false;
	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
}

static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
	uint8_t alpm_caps = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
			      &alpm_caps) != 1)
		return false;
	return alpm_caps & DP_ALPM_CAP;
}

188 189 190 191 192 193 194 195 196 197 198 199
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
	u8 val = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux,
			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
	else
		DRM_ERROR("Unable to get sink synchronization latency\n");
	return val;
}

200 201 202 203 204 205 206 207
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);

	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
			 sizeof(intel_dp->psr_dpcd));

208
	if (intel_dp->psr_dpcd[0]) {
209 210 211 212 213
		dev_priv->psr.sink_support = true;
		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
	}

	if (INTEL_GEN(dev_priv) >= 9 &&
214 215 216 217 218 219 220 221 222 223 224 225
	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
		/*
		 * All panels that supports PSR version 03h (PSR2 +
		 * Y-coordinate) can handle Y-coordinates in VSC but we are
		 * only sure that it is going to be used when required by the
		 * panel. This way panel is capable to do selective update
		 * without a aux frame sync.
		 *
		 * To support PSR version 02h and PSR version 03h without
		 * Y-coordinate requirement panels we would need to enable
		 * GTC first.
		 */
226 227 228 229
		dev_priv->psr.sink_psr2_support =
				intel_dp_get_y_coord_required(intel_dp);
		DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
			      ? "supported" : "not supported");
230

231
		if (dev_priv->psr.sink_psr2_support) {
232 233 234 235
			dev_priv->psr.colorimetry_support =
				intel_dp_get_colorimetry_status(intel_dp);
			dev_priv->psr.alpm =
				intel_dp_get_alpm_status(intel_dp);
236 237
			dev_priv->psr.sink_sync_latency =
				intel_dp_get_sink_sync_latency(intel_dp);
238 239 240 241
		}
	}
}

242 243
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
244
	struct drm_i915_private *dev_priv = to_i915(dev);
245 246 247 248 249 250 251 252
	uint32_t val;

	val = I915_READ(VLV_PSRSTAT(pipe)) &
	      VLV_EDP_PSR_CURR_STATE_MASK;
	return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}

253 254
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
			      const struct intel_crtc_state *crtc_state)
255
{
256 257
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
258 259 260
	uint32_t val;

	/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
261
	val  = I915_READ(VLV_VSCSDP(crtc->pipe));
262 263
	val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
	val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
264
	I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
265 266
}

267 268
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
			      const struct intel_crtc_state *crtc_state)
269
{
270
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 272
	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
	struct edp_vsc_psr psr_vsc;
273

274
	if (dev_priv->psr.psr2_enabled) {
275 276 277 278
		/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
		memset(&psr_vsc, 0, sizeof(psr_vsc));
		psr_vsc.sdp_header.HB0 = 0;
		psr_vsc.sdp_header.HB1 = 0x7;
279
		if (dev_priv->psr.colorimetry_support) {
280 281
			psr_vsc.sdp_header.HB2 = 0x5;
			psr_vsc.sdp_header.HB3 = 0x13;
282
		} else {
283 284 285
			psr_vsc.sdp_header.HB2 = 0x4;
			psr_vsc.sdp_header.HB3 = 0xe;
		}
286
	} else {
287 288 289 290 291 292
		/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
		memset(&psr_vsc, 0, sizeof(psr_vsc));
		psr_vsc.sdp_header.HB0 = 0;
		psr_vsc.sdp_header.HB1 = 0x7;
		psr_vsc.sdp_header.HB2 = 0x2;
		psr_vsc.sdp_header.HB3 = 0x8;
293 294
	}

295 296
	intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
					DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
297 298
}

299 300 301
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
{
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
302
			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
303 304
}

305
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
306 307
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
308 309 310
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
	u32 aux_clock_divider, aux_ctl;
	int i;
R
Rodrigo Vivi 已提交
311 312 313 314 315 316 317
	static const uint8_t aux_msg[] = {
		[0] = DP_AUX_NATIVE_WRITE << 4,
		[1] = DP_SET_POWER >> 8,
		[2] = DP_SET_POWER & 0xff,
		[3] = 1 - 1,
		[4] = DP_SET_POWER_D0,
	};
318 319 320 321
	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
R
Rodrigo Vivi 已提交
322 323

	BUILD_BUG_ON(sizeof(aux_msg) > 20);
324
	for (i = 0; i < sizeof(aux_msg); i += 4)
325
		I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
326 327
			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));

328 329 330
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);

	/* Start with bits set for DDI_AUX_CTL register */
331 332
	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
					     aux_clock_divider);
333 334 335 336

	/* Select only valid bits for SRD_AUX_CTL */
	aux_ctl &= psr_aux_mask;
	I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
337 338 339 340 341 342 343
}

static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
344
	u8 dpcd_val = DP_PSR_ENABLE;
345

346
	/* Enable ALPM at sink for psr2 */
347
	if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
348 349 350
		drm_dp_dpcd_writeb(&intel_dp->aux,
				DP_RECEIVER_ALPM_CONFIG,
				DP_ALPM_ENABLE);
351 352 353

	if (dev_priv->psr.psr2_enabled)
		dpcd_val |= DP_PSR_ENABLE_PSR2;
354
	if (dev_priv->psr.link_standby)
355 356
		dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
357

358
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
R
Rodrigo Vivi 已提交
359 360
}

361 362
static void vlv_psr_enable_source(struct intel_dp *intel_dp,
				  const struct intel_crtc_state *crtc_state)
363 364
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
365 366
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
367

368
	/* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
369
	I915_WRITE(VLV_PSRCTL(crtc->pipe),
370 371 372 373 374
		   VLV_EDP_PSR_MODE_SW_TIMER |
		   VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
		   VLV_EDP_PSR_ENABLE);
}

375 376 377 378
static void vlv_psr_activate(struct intel_dp *intel_dp)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
379
	struct drm_i915_private *dev_priv = to_i915(dev);
380 381 382
	struct drm_crtc *crtc = dig_port->base.base.crtc;
	enum pipe pipe = to_intel_crtc(crtc)->pipe;

383 384 385 386 387
	/*
	 * Let's do the transition from PSR_state 1 (inactive) to
	 * PSR_state 2 (transition to active - static frame transmission).
	 * Then Hardware is responsible for the transition to
	 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
388 389 390 391 392
	 */
	I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
		   VLV_EDP_PSR_ACTIVE_ENTRY);
}

R
Rodrigo Vivi 已提交
393
static void hsw_activate_psr1(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
394 395 396
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
397
	struct drm_i915_private *dev_priv = to_i915(dev);
398

R
Rodrigo Vivi 已提交
399
	uint32_t max_sleep_time = 0x1f;
400 401 402 403 404 405
	/*
	 * Let's respect VBT in case VBT asks a higher idle_frame value.
	 * Let's use 6 as the minimum to cover all known cases including
	 * the off-by-one issue that HW has in some cases. Also there are
	 * cases where sink should be able to train
	 * with the 5 or 6 idle patterns.
406
	 */
407
	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
408 409 410 411
	uint32_t val = EDP_PSR_ENABLE;

	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
412

413
	if (IS_HASWELL(dev_priv))
414
		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
R
Rodrigo Vivi 已提交
415

416 417 418
	if (dev_priv->psr.link_standby)
		val |= EDP_PSR_LINK_STANDBY;

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
	if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
		val |= EDP_PSR_TP1_TIME_2500us;
	else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
		val |= EDP_PSR_TP1_TIME_500us;
	else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
		val |= EDP_PSR_TP1_TIME_100us;
	else
		val |= EDP_PSR_TP1_TIME_0us;

	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
		val |= EDP_PSR_TP2_TP3_TIME_500us;
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
		val |= EDP_PSR_TP2_TP3_TIME_100us;
	else
		val |= EDP_PSR_TP2_TP3_TIME_0us;

	if (intel_dp_source_supports_hbr2(intel_dp) &&
	    drm_dp_tps3_supported(intel_dp->dpcd))
		val |= EDP_PSR_TP1_TP3_SEL;
	else
		val |= EDP_PSR_TP1_TP2_SEL;

443
	val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
444
	I915_WRITE(EDP_PSR_CTL, val);
445
}
446

R
Rodrigo Vivi 已提交
447
static void hsw_activate_psr2(struct intel_dp *intel_dp)
448 449 450 451 452 453 454 455 456 457 458 459
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	/*
	 * Let's respect VBT in case VBT asks a higher idle_frame value.
	 * Let's use 6 as the minimum to cover all known cases including
	 * the off-by-one issue that HW has in some cases. Also there are
	 * cases where sink should be able to train
	 * with the 5 or 6 idle patterns.
	 */
	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
460
	u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
461 462 463 464

	/* FIXME: selective update is probably totally broken because it doesn't
	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
	 * good enough. */
465 466 467 468
	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
		val |= EDP_Y_COORDINATE_VALID | EDP_Y_COORDINATE_ENABLE;
	}
469

470
	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
471 472 473 474 475 476 477 478 479

	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
		val |= EDP_PSR2_TP2_TIME_2500;
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
		val |= EDP_PSR2_TP2_TIME_500;
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
		val |= EDP_PSR2_TP2_TIME_100;
	else
		val |= EDP_PSR2_TP2_TIME_50;
480

481
	I915_WRITE(EDP_PSR2_CTL, val);
R
Rodrigo Vivi 已提交
482 483
}

R
Rodrigo Vivi 已提交
484
static void hsw_psr_activate(struct intel_dp *intel_dp)
485 486 487 488 489
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);

R
Rodrigo Vivi 已提交
490 491 492 493 494
	/* On HSW+ after we enable PSR on source it will activate it
	 * as soon as it match configure idle_frame count. So
	 * we just actually enable it here on activation time.
	 */

495
	/* psr1 and psr2 are mutually exclusive.*/
496
	if (dev_priv->psr.psr2_enabled)
R
Rodrigo Vivi 已提交
497
		hsw_activate_psr2(intel_dp);
498
	else
R
Rodrigo Vivi 已提交
499
		hsw_activate_psr1(intel_dp);
500 501
}

502 503 504 505 506
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
				    struct intel_crtc_state *crtc_state)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
507 508 509
	int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
	int psr_max_h = 0, psr_max_v = 0;
510 511 512 513 514 515

	/*
	 * FIXME psr2_support is messed up. It's both computed
	 * dynamically during PSR enable, and extracted from sink
	 * caps during eDP detection.
	 */
516
	if (!dev_priv->psr.sink_psr2_support)
517 518
		return false;

519 520 521 522 523 524 525 526 527 528 529 530
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
		psr_max_h = 4096;
		psr_max_v = 2304;
	} else if (IS_GEN9(dev_priv)) {
		psr_max_h = 3640;
		psr_max_v = 2304;
	}

	if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
		DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
			      crtc_hdisplay, crtc_vdisplay,
			      psr_max_h, psr_max_v);
531 532 533 534 535 536
		return false;
	}

	return true;
}

537 538
void intel_psr_compute_config(struct intel_dp *intel_dp,
			      struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
539 540
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
541
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
542
	const struct drm_display_mode *adjusted_mode =
543
		&crtc_state->base.adjusted_mode;
544
	int psr_setup_time;
R
Rodrigo Vivi 已提交
545

546
	if (!CAN_PSR(dev_priv))
547 548 549 550 551 552
		return;

	if (!i915_modparams.enable_psr) {
		DRM_DEBUG_KMS("PSR disable by flag\n");
		return;
	}
R
Rodrigo Vivi 已提交
553

554 555 556 557 558 559 560
	/*
	 * HSW spec explicitly says PSR is tied to port A.
	 * BDW+ platforms with DDI implementation of PSR have different
	 * PSR registers per transcoder and we only implement transcoder EDP
	 * ones. Since by Display design transcoder EDP is tied to port A
	 * we can safely escape based on the port A.
	 */
561
	if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
562
		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
563
		return;
R
Rodrigo Vivi 已提交
564 565
	}

566
	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
567 568
	    !dev_priv->psr.link_standby) {
		DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
569
		return;
570 571
	}

572
	if (IS_HASWELL(dev_priv) &&
573
	    I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
574
		      S3D_ENABLE) {
R
Rodrigo Vivi 已提交
575
		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
576
		return;
R
Rodrigo Vivi 已提交
577 578
	}

579
	if (IS_HASWELL(dev_priv) &&
580
	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
R
Rodrigo Vivi 已提交
581
		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
582
		return;
R
Rodrigo Vivi 已提交
583 584
	}

585 586 587 588
	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
	if (psr_setup_time < 0) {
		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
			      intel_dp->psr_dpcd[1]);
589
		return;
590 591 592 593 594 595
	}

	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
			      psr_setup_time);
596 597 598
		return;
	}

599 600 601 602 603
	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
		DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
		return;
	}

604
	crtc_state->has_psr = true;
605 606
	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
	DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
R
Rodrigo Vivi 已提交
607 608
}

609
static void intel_psr_activate(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
610 611 612
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
613
	struct drm_i915_private *dev_priv = to_i915(dev);
R
Rodrigo Vivi 已提交
614

615
	if (dev_priv->psr.psr2_enabled)
616 617 618
		WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
	else
		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
R
Rodrigo Vivi 已提交
619 620 621
	WARN_ON(dev_priv->psr.active);
	lockdep_assert_held(&dev_priv->psr.lock);

R
Rodrigo Vivi 已提交
622
	dev_priv->psr.activate(intel_dp);
R
Rodrigo Vivi 已提交
623 624 625
	dev_priv->psr.active = true;
}

626 627 628 629 630 631 632 633
static void hsw_psr_enable_source(struct intel_dp *intel_dp,
				  const struct intel_crtc_state *crtc_state)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;

634 635
	psr_aux_io_power_get(intel_dp);

636 637 638 639 640 641
	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
	 * use hardcoded values PSR AUX transactions
	 */
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_psr_setup_aux(intel_dp);

642
	if (dev_priv->psr.psr2_enabled) {
643 644 645 646 647 648 649 650
		u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));

		if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
			chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
				   | PSR2_ADD_VERTICAL_LINE_COUNT);

		else
			chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
651 652
		I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);

653
		I915_WRITE(EDP_PSR_DEBUG,
654 655 656 657 658 659 660 661 662 663 664 665 666
			   EDP_PSR_DEBUG_MASK_MEMUP |
			   EDP_PSR_DEBUG_MASK_HPD |
			   EDP_PSR_DEBUG_MASK_LPSP |
			   EDP_PSR_DEBUG_MASK_MAX_SLEEP |
			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
	} else {
		/*
		 * Per Spec: Avoid continuous PSR exit by masking MEMUP
		 * and HPD. also mask LPSP to avoid dependency on other
		 * drivers that might block runtime_pm besides
		 * preventing  other hw tracking issues now we can rely
		 * on frontbuffer tracking.
		 */
667
		I915_WRITE(EDP_PSR_DEBUG,
668 669
			   EDP_PSR_DEBUG_MASK_MEMUP |
			   EDP_PSR_DEBUG_MASK_HPD |
670 671
			   EDP_PSR_DEBUG_MASK_LPSP |
			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
672 673 674
	}
}

R
Rodrigo Vivi 已提交
675 676 677
/**
 * intel_psr_enable - Enable PSR
 * @intel_dp: Intel DP
678
 * @crtc_state: new CRTC state
R
Rodrigo Vivi 已提交
679 680 681
 *
 * This function can only be called after the pipe is fully trained and enabled.
 */
682 683
void intel_psr_enable(struct intel_dp *intel_dp,
		      const struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
684 685 686
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
687
	struct drm_i915_private *dev_priv = to_i915(dev);
R
Rodrigo Vivi 已提交
688

689
	if (!crtc_state->has_psr)
R
Rodrigo Vivi 已提交
690 691
		return;

692 693 694
	if (WARN_ON(!CAN_PSR(dev_priv)))
		return;

695
	WARN_ON(dev_priv->drrs.dp);
R
Rodrigo Vivi 已提交
696 697 698 699 700 701
	mutex_lock(&dev_priv->psr.lock);
	if (dev_priv->psr.enabled) {
		DRM_DEBUG_KMS("PSR already in use\n");
		goto unlock;
	}

702
	dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
R
Rodrigo Vivi 已提交
703 704
	dev_priv->psr.busy_frontbuffer_bits = 0;

705
	dev_priv->psr.setup_vsc(intel_dp, crtc_state);
706
	dev_priv->psr.enable_sink(intel_dp);
707
	dev_priv->psr.enable_source(intel_dp, crtc_state);
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	dev_priv->psr.enabled = intel_dp;

	if (INTEL_GEN(dev_priv) >= 9) {
		intel_psr_activate(intel_dp);
	} else {
		/*
		 * FIXME: Activation should happen immediately since this
		 * function is just called after pipe is fully trained and
		 * enabled.
		 * However on some platforms we face issues when first
		 * activation follows a modeset so quickly.
		 *     - On VLV/CHV we get bank screen on first activation
		 *     - On HSW/BDW we get a recoverable frozen screen until
		 *       next exit-activate sequence.
		 */
723 724
		schedule_delayed_work(&dev_priv->psr.work,
				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
725
	}
726

R
Rodrigo Vivi 已提交
727 728 729 730
unlock:
	mutex_unlock(&dev_priv->psr.lock);
}

731 732
static void vlv_psr_disable(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *old_crtc_state)
R
Rodrigo Vivi 已提交
733 734 735
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
736
	struct drm_i915_private *dev_priv = to_i915(dev);
737
	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
738
	uint32_t val;
R
Rodrigo Vivi 已提交
739

740
	if (dev_priv->psr.active) {
741
		/* Put VLV PSR back to PSR_state 0 (disabled). */
742
		if (intel_wait_for_register(dev_priv,
743
					    VLV_PSRSTAT(crtc->pipe),
744 745 746
					    VLV_EDP_PSR_IN_TRANS,
					    0,
					    1))
747 748
			WARN(1, "PSR transition took longer than expected\n");

749
		val = I915_READ(VLV_PSRCTL(crtc->pipe));
750 751 752
		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
		val &= ~VLV_EDP_PSR_ENABLE;
		val &= ~VLV_EDP_PSR_MODE_MASK;
753
		I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
754 755 756

		dev_priv->psr.active = false;
	} else {
757
		WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
R
Rodrigo Vivi 已提交
758
	}
759 760
}

761 762
static void hsw_psr_disable(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *old_crtc_state)
763 764 765
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
766
	struct drm_i915_private *dev_priv = to_i915(dev);
R
Rodrigo Vivi 已提交
767 768

	if (dev_priv->psr.active) {
769
		i915_reg_t psr_status;
770 771
		u32 psr_status_mask;

772
		if (dev_priv->psr.psr2_enabled) {
773
			psr_status = EDP_PSR2_STATUS;
774 775
			psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;

776 777
			I915_WRITE(EDP_PSR2_CTL,
				   I915_READ(EDP_PSR2_CTL) &
778 779
				   ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));

780
		} else {
781
			psr_status = EDP_PSR_STATUS;
782 783
			psr_status_mask = EDP_PSR_STATUS_STATE_MASK;

784 785
			I915_WRITE(EDP_PSR_CTL,
				   I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
786
		}
787 788 789

		/* Wait till PSR is idle */
		if (intel_wait_for_register(dev_priv,
790
					    psr_status, psr_status_mask, 0,
791 792 793
					    2000))
			DRM_ERROR("Timed out waiting for PSR Idle State\n");

R
Rodrigo Vivi 已提交
794 795
		dev_priv->psr.active = false;
	} else {
796
		if (dev_priv->psr.psr2_enabled)
797 798 799
			WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
		else
			WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
R
Rodrigo Vivi 已提交
800
	}
801 802

	psr_aux_io_power_put(intel_dp);
803 804 805 806 807
}

/**
 * intel_psr_disable - Disable PSR
 * @intel_dp: Intel DP
808
 * @old_crtc_state: old CRTC state
809 810 811
 *
 * This function needs to be called before disabling pipe.
 */
812 813
void intel_psr_disable(struct intel_dp *intel_dp,
		       const struct intel_crtc_state *old_crtc_state)
814 815 816
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
817
	struct drm_i915_private *dev_priv = to_i915(dev);
818

819
	if (!old_crtc_state->has_psr)
820 821
		return;

822 823 824
	if (WARN_ON(!CAN_PSR(dev_priv)))
		return;

825 826 827 828 829 830
	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

831
	dev_priv->psr.disable_source(intel_dp, old_crtc_state);
R
Rodrigo Vivi 已提交
832

833 834 835
	/* Disable PSR on Sink */
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);

R
Rodrigo Vivi 已提交
836 837 838 839 840 841
	dev_priv->psr.enabled = NULL;
	mutex_unlock(&dev_priv->psr.lock);

	cancel_delayed_work_sync(&dev_priv->psr.work);
}

842
static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
R
Rodrigo Vivi 已提交
843
{
844 845 846 847 848 849 850 851
	struct intel_dp *intel_dp;
	i915_reg_t reg;
	u32 mask;
	int err;

	intel_dp = dev_priv->psr.enabled;
	if (!intel_dp)
		return false;
R
Rodrigo Vivi 已提交
852

853
	if (HAS_DDI(dev_priv)) {
854
		if (dev_priv->psr.psr2_enabled) {
855 856
			reg = EDP_PSR2_STATUS;
			mask = EDP_PSR2_STATUS_STATE_MASK;
857
		} else {
858 859
			reg = EDP_PSR_STATUS;
			mask = EDP_PSR_STATUS_STATE_MASK;
860 861
		}
	} else {
862 863 864 865 866 867
		struct drm_crtc *crtc =
			dp_to_dig_port(intel_dp)->base.base.crtc;
		enum pipe pipe = to_intel_crtc(crtc)->pipe;

		reg = VLV_PSRSTAT(pipe);
		mask = VLV_EDP_PSR_IN_TRANS;
R
Rodrigo Vivi 已提交
868
	}
869 870 871 872 873 874 875 876

	mutex_unlock(&dev_priv->psr.lock);

	err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
	if (err)
		DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");

	/* After the unlocked wait, verify that PSR is still wanted! */
R
Rodrigo Vivi 已提交
877
	mutex_lock(&dev_priv->psr.lock);
878 879
	return err == 0 && dev_priv->psr.enabled;
}
R
Rodrigo Vivi 已提交
880

881 882 883 884 885 886 887 888 889 890 891 892 893 894
static void intel_psr_work(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv), psr.work.work);

	mutex_lock(&dev_priv->psr.lock);

	/*
	 * We have to make sure PSR is ready for re-enable
	 * otherwise it keeps disabled until next full enable/disable cycle.
	 * PSR might take some time to get fully disabled
	 * and be ready for re-enable.
	 */
	if (!psr_wait_for_idle(dev_priv))
R
Rodrigo Vivi 已提交
895 896 897 898 899 900 901 902 903 904
		goto unlock;

	/*
	 * The delayed work can race with an invalidate hence we need to
	 * recheck. Since psr_flush first clears this and then reschedules we
	 * won't ever miss a flush when bailing out here.
	 */
	if (dev_priv->psr.busy_frontbuffer_bits)
		goto unlock;

905
	intel_psr_activate(dev_priv->psr.enabled);
R
Rodrigo Vivi 已提交
906 907 908 909
unlock:
	mutex_unlock(&dev_priv->psr.lock);
}

910
static void intel_psr_exit(struct drm_i915_private *dev_priv)
R
Rodrigo Vivi 已提交
911
{
912 913 914 915
	struct intel_dp *intel_dp = dev_priv->psr.enabled;
	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
	u32 val;
R
Rodrigo Vivi 已提交
916

917 918 919
	if (!dev_priv->psr.active)
		return;

920
	if (HAS_DDI(dev_priv)) {
921
		if (dev_priv->psr.psr2_enabled) {
922 923 924 925 926 927 928 929
			val = I915_READ(EDP_PSR2_CTL);
			WARN_ON(!(val & EDP_PSR2_ENABLE));
			I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
		} else {
			val = I915_READ(EDP_PSR_CTL);
			WARN_ON(!(val & EDP_PSR_ENABLE));
			I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
		}
930 931 932
	} else {
		val = I915_READ(VLV_PSRCTL(pipe));

933 934 935 936 937 938 939 940
		/*
		 * Here we do the transition drirectly from
		 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
		 * PSR_state 5 (exit).
		 * PSR State 4 (active with single frame update) can be skipped.
		 * On PSR_state 5 (exit) Hardware is responsible to transition
		 * back to PSR_state 1 (inactive).
		 * Now we are at Same state after vlv_psr_enable_source.
941 942 943 944
		 */
		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
		I915_WRITE(VLV_PSRCTL(pipe), val);

945 946
		/*
		 * Send AUX wake up - Spec says after transitioning to PSR
947 948 949 950 951 952 953 954
		 * active we have to send AUX wake up by writing 01h in DPCD
		 * 600h of sink device.
		 * XXX: This might slow down the transition, but without this
		 * HW doesn't complete the transition to PSR_state 1 and we
		 * never get the screen updated.
		 */
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
				   DP_SET_POWER_D0);
R
Rodrigo Vivi 已提交
955 956
	}

957
	dev_priv->psr.active = false;
R
Rodrigo Vivi 已提交
958 959
}

960 961
/**
 * intel_psr_single_frame_update - Single Frame Update
962
 * @dev_priv: i915 device
963
 * @frontbuffer_bits: frontbuffer plane tracking bits
964 965 966 967 968 969
 *
 * Some platforms support a single frame update feature that is used to
 * send and update only one frame on Remote Frame Buffer.
 * So far it is only implemented for Valleyview and Cherryview because
 * hardware requires this to be done before a page flip.
 */
970
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
971
				   unsigned frontbuffer_bits)
972 973 974 975 976
{
	struct drm_crtc *crtc;
	enum pipe pipe;
	u32 val;

977
	if (!CAN_PSR(dev_priv))
978 979
		return;

980 981 982 983
	/*
	 * Single frame update is already supported on BDW+ but it requires
	 * many W/A and it isn't really needed.
	 */
984
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
985 986 987 988 989 990 991 992 993 994 995
		return;

	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
	pipe = to_intel_crtc(crtc)->pipe;

996 997
	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
		val = I915_READ(VLV_PSRCTL(pipe));
998

999 1000 1001 1002 1003 1004
		/*
		 * We need to set this bit before writing registers for a flip.
		 * This bit will be self-clear when it gets to the PSR active state.
		 */
		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
	}
1005 1006 1007
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
1008 1009
/**
 * intel_psr_invalidate - Invalidade PSR
1010
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1011
 * @frontbuffer_bits: frontbuffer plane tracking bits
1012
 * @origin: which operation caused the invalidate
R
Rodrigo Vivi 已提交
1013 1014 1015 1016 1017 1018 1019 1020
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 */
1021
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1022
			  unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1023 1024 1025 1026
{
	struct drm_crtc *crtc;
	enum pipe pipe;

1027
	if (!CAN_PSR(dev_priv))
1028 1029
		return;

1030 1031 1032
	if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
		return;

R
Rodrigo Vivi 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
	pipe = to_intel_crtc(crtc)->pipe;

	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1044 1045

	if (frontbuffer_bits)
1046
		intel_psr_exit(dev_priv);
1047

R
Rodrigo Vivi 已提交
1048 1049 1050
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
1051 1052
/**
 * intel_psr_flush - Flush PSR
1053
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
1054
 * @frontbuffer_bits: frontbuffer plane tracking bits
1055
 * @origin: which operation caused the flush
R
Rodrigo Vivi 已提交
1056 1057 1058 1059 1060 1061 1062 1063
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering has completed and flushed out to memory. PSR
 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
 */
1064
void intel_psr_flush(struct drm_i915_private *dev_priv,
1065
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
1066 1067 1068 1069
{
	struct drm_crtc *crtc;
	enum pipe pipe;

1070
	if (!CAN_PSR(dev_priv))
1071 1072
		return;

1073 1074 1075
	if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
		return;

R
Rodrigo Vivi 已提交
1076 1077 1078 1079 1080 1081 1082 1083
	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
	pipe = to_intel_crtc(crtc)->pipe;
1084 1085

	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
R
Rodrigo Vivi 已提交
1086 1087
	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;

1088
	/* By definition flush = invalidate + flush */
1089
	if (frontbuffer_bits) {
1090
		if (dev_priv->psr.psr2_enabled ||
1091 1092 1093 1094 1095 1096 1097 1098
		    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
			intel_psr_exit(dev_priv);
		} else {
			/*
			 * Display WA #0884: all
			 * This documented WA for bxt can be safely applied
			 * broadly so we can force HW tracking to exit PSR
			 * instead of disabling and re-enabling.
1099
			 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1100 1101 1102
			 * but it makes more sense write to the current active
			 * pipe.
			 */
1103
			I915_WRITE(CURSURFLIVE(pipe), 0);
1104 1105
		}
	}
1106

R
Rodrigo Vivi 已提交
1107
	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1108 1109
		if (!work_busy(&dev_priv->psr.work.work))
			schedule_delayed_work(&dev_priv->psr.work,
1110
					      msecs_to_jiffies(100));
R
Rodrigo Vivi 已提交
1111 1112 1113
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
1114 1115
/**
 * intel_psr_init - Init basic PSR work and mutex.
1116
 * @dev_priv: i915 device private
R
Rodrigo Vivi 已提交
1117 1118 1119 1120
 *
 * This function is  called only once at driver load to initialize basic
 * PSR stuff.
 */
1121
void intel_psr_init(struct drm_i915_private *dev_priv)
R
Rodrigo Vivi 已提交
1122
{
1123 1124 1125
	if (!HAS_PSR(dev_priv))
		return;

1126 1127 1128
	dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;

1129 1130 1131
	if (!dev_priv->psr.sink_support)
		return;

1132
	/* Per platform default: all disabled. */
1133 1134
	if (i915_modparams.enable_psr == -1)
		i915_modparams.enable_psr = 0;
1135

1136
	/* Set link_standby x link_off defaults */
1137
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1138 1139
		/* HSW and BDW require workarounds that we don't implement. */
		dev_priv->psr.link_standby = false;
1140
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1141 1142 1143 1144 1145 1146
		/* On VLV and CHV only standby mode is supported. */
		dev_priv->psr.link_standby = true;
	else
		/* For new platforms let's respect VBT back again */
		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;

1147
	/* Override link_standby x link_off defaults */
1148
	if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
1149 1150 1151
		DRM_DEBUG_KMS("PSR: Forcing link standby\n");
		dev_priv->psr.link_standby = true;
	}
1152
	if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
1153 1154 1155 1156
		DRM_DEBUG_KMS("PSR: Forcing main link off\n");
		dev_priv->psr.link_standby = false;
	}

R
Rodrigo Vivi 已提交
1157 1158
	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
	mutex_init(&dev_priv->psr.lock);
1159 1160

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1161
		dev_priv->psr.enable_source = vlv_psr_enable_source;
1162
		dev_priv->psr.disable_source = vlv_psr_disable;
1163
		dev_priv->psr.enable_sink = vlv_psr_enable_sink;
R
Rodrigo Vivi 已提交
1164
		dev_priv->psr.activate = vlv_psr_activate;
1165
		dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
1166
	} else {
1167
		dev_priv->psr.has_hw_tracking = true;
1168
		dev_priv->psr.enable_source = hsw_psr_enable_source;
1169
		dev_priv->psr.disable_source = hsw_psr_disable;
1170
		dev_priv->psr.enable_sink = hsw_psr_enable_sink;
R
Rodrigo Vivi 已提交
1171
		dev_priv->psr.activate = hsw_psr_activate;
1172
		dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
1173
	}
R
Rodrigo Vivi 已提交
1174
}