intel_psr.c 34.2 KB
Newer Older
R
Rodrigo Vivi 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
/**
 * DOC: Panel Self Refresh (PSR/SRD)
 *
 * Since Haswell Display controller supports Panel Self-Refresh on display
 * panels witch have a remote frame buffer (RFB) implemented according to PSR
 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
 * when system is idle but display is on as it eliminates display refresh
 * request to DDR memory completely as long as the frame buffer for that
 * display is unchanged.
 *
 * Panel Self Refresh must be supported by both Hardware (source) and
 * Panel (sink).
 *
 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
 * to power down the link and memory controller. For DSI panels the same idea
 * is called "manual mode".
 *
 * The implementation uses the hardware-based PSR support which automatically
 * enters/exits self-refresh mode. The hardware takes care of sending the
 * required DP aux message and could even retrain the link (that part isn't
 * enabled yet though). The hardware also keeps track of any frontbuffer
 * changes to know when to exit self-refresh mode again. Unfortunately that
 * part doesn't work too well, hence why the i915 PSR support uses the
 * software frontbuffer tracking to make sure it doesn't miss a screen
 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
 * get called by the frontbuffer tracking code. Note that because of locking
 * issues the self-refresh re-enable code is done from a work queue, which
 * must be correctly synchronized/cancelled when shutting down the pipe."
 */

R
Rodrigo Vivi 已提交
54 55 56 57 58
#include <drm/drmP.h>

#include "intel_drv.h"
#include "i915_drv.h"

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
static inline enum intel_display_power_domain
psr_aux_domain(struct intel_dp *intel_dp)
{
	/* CNL HW requires corresponding AUX IOs to be powered up for PSR.
	 * However, for non-A AUX ports the corresponding non-EDP transcoders
	 * would have already enabled power well 2 and DC_OFF. This means we can
	 * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
	 * specific AUX_IO reference without powering up any extra wells.
	 * Note that PSR is enabled only on Port A even though this function
	 * returns the correct domain for other ports too.
	 */
	return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
					      intel_dp->aux_power_domain;
}

static void psr_aux_io_power_get(struct intel_dp *intel_dp)
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);

	if (INTEL_GEN(dev_priv) < 10)
		return;

	intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
}

static void psr_aux_io_power_put(struct intel_dp *intel_dp)
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);

	if (INTEL_GEN(dev_priv) < 10)
		return;

	intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
}

96
static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
{
	uint8_t psr_caps = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
		return false;
	return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
}

static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
	uint8_t dprx = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
			      &dprx) != 1)
		return false;
	return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
}

static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
	uint8_t alpm_caps = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
			      &alpm_caps) != 1)
		return false;
	return alpm_caps & DP_ALPM_CAP;
}

125 126 127 128 129 130 131 132 133 134 135 136
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
	u8 val = 0;

	if (drm_dp_dpcd_readb(&intel_dp->aux,
			      DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
		val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
	else
		DRM_ERROR("Unable to get sink synchronization latency\n");
	return val;
}

137 138 139 140 141 142 143 144
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
	struct drm_i915_private *dev_priv =
		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);

	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
			 sizeof(intel_dp->psr_dpcd));

145
	if (intel_dp->psr_dpcd[0]) {
146 147 148 149 150
		dev_priv->psr.sink_support = true;
		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
	}

	if (INTEL_GEN(dev_priv) >= 9 &&
151 152 153 154 155 156 157 158 159 160 161 162
	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
		/*
		 * All panels that supports PSR version 03h (PSR2 +
		 * Y-coordinate) can handle Y-coordinates in VSC but we are
		 * only sure that it is going to be used when required by the
		 * panel. This way panel is capable to do selective update
		 * without a aux frame sync.
		 *
		 * To support PSR version 02h and PSR version 03h without
		 * Y-coordinate requirement panels we would need to enable
		 * GTC first.
		 */
163 164 165 166
		dev_priv->psr.sink_psr2_support =
				intel_dp_get_y_coord_required(intel_dp);
		DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
			      ? "supported" : "not supported");
167

168
		if (dev_priv->psr.sink_psr2_support) {
169 170 171 172
			dev_priv->psr.colorimetry_support =
				intel_dp_get_colorimetry_status(intel_dp);
			dev_priv->psr.alpm =
				intel_dp_get_alpm_status(intel_dp);
173 174
			dev_priv->psr.sink_sync_latency =
				intel_dp_get_sink_sync_latency(intel_dp);
175 176 177 178
		}
	}
}

179 180
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
181
	struct drm_i915_private *dev_priv = to_i915(dev);
182 183 184 185 186 187 188 189
	uint32_t val;

	val = I915_READ(VLV_PSRSTAT(pipe)) &
	      VLV_EDP_PSR_CURR_STATE_MASK;
	return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
}

190 191
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
			      const struct intel_crtc_state *crtc_state)
192
{
193 194
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
195 196 197
	uint32_t val;

	/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
198
	val  = I915_READ(VLV_VSCSDP(crtc->pipe));
199 200
	val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
	val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
201
	I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
202 203
}

204 205
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
			      const struct intel_crtc_state *crtc_state)
206
{
207
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
208 209
	struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
	struct edp_vsc_psr psr_vsc;
210

211
	if (dev_priv->psr.psr2_enabled) {
212 213 214 215
		/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
		memset(&psr_vsc, 0, sizeof(psr_vsc));
		psr_vsc.sdp_header.HB0 = 0;
		psr_vsc.sdp_header.HB1 = 0x7;
216
		if (dev_priv->psr.colorimetry_support) {
217 218
			psr_vsc.sdp_header.HB2 = 0x5;
			psr_vsc.sdp_header.HB3 = 0x13;
219
		} else {
220 221 222
			psr_vsc.sdp_header.HB2 = 0x4;
			psr_vsc.sdp_header.HB3 = 0xe;
		}
223
	} else {
224 225 226 227 228 229
		/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
		memset(&psr_vsc, 0, sizeof(psr_vsc));
		psr_vsc.sdp_header.HB0 = 0;
		psr_vsc.sdp_header.HB1 = 0x7;
		psr_vsc.sdp_header.HB2 = 0x2;
		psr_vsc.sdp_header.HB3 = 0x8;
230 231
	}

232 233
	intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
					DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
234 235
}

236 237 238
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
{
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
239
			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
240 241
}

242
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
243 244
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
245 246 247
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
	u32 aux_clock_divider, aux_ctl;
	int i;
R
Rodrigo Vivi 已提交
248 249 250 251 252 253 254
	static const uint8_t aux_msg[] = {
		[0] = DP_AUX_NATIVE_WRITE << 4,
		[1] = DP_SET_POWER >> 8,
		[2] = DP_SET_POWER & 0xff,
		[3] = 1 - 1,
		[4] = DP_SET_POWER_D0,
	};
255 256 257 258
	u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
			   EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
			   EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
			   EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
R
Rodrigo Vivi 已提交
259 260

	BUILD_BUG_ON(sizeof(aux_msg) > 20);
261
	for (i = 0; i < sizeof(aux_msg); i += 4)
262
		I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
263 264
			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));

265 266 267
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);

	/* Start with bits set for DDI_AUX_CTL register */
268 269
	aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
					     aux_clock_divider);
270 271 272 273

	/* Select only valid bits for SRD_AUX_CTL */
	aux_ctl &= psr_aux_mask;
	I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
274 275 276 277 278 279 280
}

static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
281
	u8 dpcd_val = DP_PSR_ENABLE;
282

283
	/* Enable ALPM at sink for psr2 */
284
	if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
285 286 287
		drm_dp_dpcd_writeb(&intel_dp->aux,
				DP_RECEIVER_ALPM_CONFIG,
				DP_ALPM_ENABLE);
288 289 290

	if (dev_priv->psr.psr2_enabled)
		dpcd_val |= DP_PSR_ENABLE_PSR2;
291
	if (dev_priv->psr.link_standby)
292 293
		dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
294

295
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
R
Rodrigo Vivi 已提交
296 297
}

298 299
static void vlv_psr_enable_source(struct intel_dp *intel_dp,
				  const struct intel_crtc_state *crtc_state)
300 301
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
302 303
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
304

305
	/* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
306
	I915_WRITE(VLV_PSRCTL(crtc->pipe),
307 308 309 310 311
		   VLV_EDP_PSR_MODE_SW_TIMER |
		   VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
		   VLV_EDP_PSR_ENABLE);
}

312 313 314 315
static void vlv_psr_activate(struct intel_dp *intel_dp)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
316
	struct drm_i915_private *dev_priv = to_i915(dev);
317 318 319
	struct drm_crtc *crtc = dig_port->base.base.crtc;
	enum pipe pipe = to_intel_crtc(crtc)->pipe;

320 321 322 323 324
	/*
	 * Let's do the transition from PSR_state 1 (inactive) to
	 * PSR_state 2 (transition to active - static frame transmission).
	 * Then Hardware is responsible for the transition to
	 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
325 326 327 328 329
	 */
	I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
		   VLV_EDP_PSR_ACTIVE_ENTRY);
}

R
Rodrigo Vivi 已提交
330
static void hsw_activate_psr1(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
331 332 333
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
334
	struct drm_i915_private *dev_priv = to_i915(dev);
335

R
Rodrigo Vivi 已提交
336
	uint32_t max_sleep_time = 0x1f;
337 338 339 340 341 342
	/*
	 * Let's respect VBT in case VBT asks a higher idle_frame value.
	 * Let's use 6 as the minimum to cover all known cases including
	 * the off-by-one issue that HW has in some cases. Also there are
	 * cases where sink should be able to train
	 * with the 5 or 6 idle patterns.
343
	 */
344
	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
345 346 347 348
	uint32_t val = EDP_PSR_ENABLE;

	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
349

350
	if (IS_HASWELL(dev_priv))
351
		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
R
Rodrigo Vivi 已提交
352

353 354 355
	if (dev_priv->psr.link_standby)
		val |= EDP_PSR_LINK_STANDBY;

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
		val |= EDP_PSR_TP1_TIME_2500us;
	else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
		val |= EDP_PSR_TP1_TIME_500us;
	else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
		val |= EDP_PSR_TP1_TIME_100us;
	else
		val |= EDP_PSR_TP1_TIME_0us;

	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
		val |= EDP_PSR_TP2_TP3_TIME_500us;
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
		val |= EDP_PSR_TP2_TP3_TIME_100us;
	else
		val |= EDP_PSR_TP2_TP3_TIME_0us;

	if (intel_dp_source_supports_hbr2(intel_dp) &&
	    drm_dp_tps3_supported(intel_dp->dpcd))
		val |= EDP_PSR_TP1_TP3_SEL;
	else
		val |= EDP_PSR_TP1_TP2_SEL;

380
	val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
381
	I915_WRITE(EDP_PSR_CTL, val);
382
}
383

R
Rodrigo Vivi 已提交
384
static void hsw_activate_psr2(struct intel_dp *intel_dp)
385 386 387 388 389 390 391 392 393 394 395 396
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	/*
	 * Let's respect VBT in case VBT asks a higher idle_frame value.
	 * Let's use 6 as the minimum to cover all known cases including
	 * the off-by-one issue that HW has in some cases. Also there are
	 * cases where sink should be able to train
	 * with the 5 or 6 idle patterns.
	 */
	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
397
	u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
398 399 400 401

	/* FIXME: selective update is probably totally broken because it doesn't
	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
	 * good enough. */
402 403 404 405
	val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
		val |= EDP_Y_COORDINATE_VALID | EDP_Y_COORDINATE_ENABLE;
	}
406

407
	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
408 409 410 411 412 413 414 415 416

	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
		val |= EDP_PSR2_TP2_TIME_2500;
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
		val |= EDP_PSR2_TP2_TIME_500;
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
		val |= EDP_PSR2_TP2_TIME_100;
	else
		val |= EDP_PSR2_TP2_TIME_50;
417

418
	I915_WRITE(EDP_PSR2_CTL, val);
R
Rodrigo Vivi 已提交
419 420
}

R
Rodrigo Vivi 已提交
421
static void hsw_psr_activate(struct intel_dp *intel_dp)
422 423 424 425 426
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);

R
Rodrigo Vivi 已提交
427 428 429 430 431
	/* On HSW+ after we enable PSR on source it will activate it
	 * as soon as it match configure idle_frame count. So
	 * we just actually enable it here on activation time.
	 */

432
	/* psr1 and psr2 are mutually exclusive.*/
433
	if (dev_priv->psr.psr2_enabled)
R
Rodrigo Vivi 已提交
434
		hsw_activate_psr2(intel_dp);
435
	else
R
Rodrigo Vivi 已提交
436
		hsw_activate_psr1(intel_dp);
437 438
}

439 440 441 442 443
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
				    struct intel_crtc_state *crtc_state)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
444 445 446
	int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay;
	int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
	int psr_max_h = 0, psr_max_v = 0;
447 448 449 450 451 452

	/*
	 * FIXME psr2_support is messed up. It's both computed
	 * dynamically during PSR enable, and extracted from sink
	 * caps during eDP detection.
	 */
453
	if (!dev_priv->psr.sink_psr2_support)
454 455
		return false;

456 457 458 459 460 461 462 463 464 465 466 467
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
		psr_max_h = 4096;
		psr_max_v = 2304;
	} else if (IS_GEN9(dev_priv)) {
		psr_max_h = 3640;
		psr_max_v = 2304;
	}

	if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
		DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
			      crtc_hdisplay, crtc_vdisplay,
			      psr_max_h, psr_max_v);
468 469 470 471 472 473
		return false;
	}

	return true;
}

474 475
void intel_psr_compute_config(struct intel_dp *intel_dp,
			      struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
476 477
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
478
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
479
	const struct drm_display_mode *adjusted_mode =
480
		&crtc_state->base.adjusted_mode;
481
	int psr_setup_time;
R
Rodrigo Vivi 已提交
482

483
	if (!CAN_PSR(dev_priv))
484 485 486 487 488 489
		return;

	if (!i915_modparams.enable_psr) {
		DRM_DEBUG_KMS("PSR disable by flag\n");
		return;
	}
R
Rodrigo Vivi 已提交
490

491 492 493 494 495 496 497
	/*
	 * HSW spec explicitly says PSR is tied to port A.
	 * BDW+ platforms with DDI implementation of PSR have different
	 * PSR registers per transcoder and we only implement transcoder EDP
	 * ones. Since by Display design transcoder EDP is tied to port A
	 * we can safely escape based on the port A.
	 */
498
	if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
499
		DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
500
		return;
R
Rodrigo Vivi 已提交
501 502
	}

503
	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
504 505
	    !dev_priv->psr.link_standby) {
		DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
506
		return;
507 508
	}

509
	if (IS_HASWELL(dev_priv) &&
510
	    I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
511
		      S3D_ENABLE) {
R
Rodrigo Vivi 已提交
512
		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
513
		return;
R
Rodrigo Vivi 已提交
514 515
	}

516
	if (IS_HASWELL(dev_priv) &&
517
	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
R
Rodrigo Vivi 已提交
518
		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
519
		return;
R
Rodrigo Vivi 已提交
520 521
	}

522 523 524 525
	psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
	if (psr_setup_time < 0) {
		DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
			      intel_dp->psr_dpcd[1]);
526
		return;
527 528 529 530 531 532
	}

	if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
	    adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
		DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
			      psr_setup_time);
533 534 535
		return;
	}

536 537 538 539 540
	if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
		DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
		return;
	}

541
	crtc_state->has_psr = true;
542 543
	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
	DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
R
Rodrigo Vivi 已提交
544 545
}

546
static void intel_psr_activate(struct intel_dp *intel_dp)
R
Rodrigo Vivi 已提交
547 548 549
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
550
	struct drm_i915_private *dev_priv = to_i915(dev);
R
Rodrigo Vivi 已提交
551

552
	if (dev_priv->psr.psr2_enabled)
553 554 555
		WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
	else
		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
R
Rodrigo Vivi 已提交
556 557 558
	WARN_ON(dev_priv->psr.active);
	lockdep_assert_held(&dev_priv->psr.lock);

R
Rodrigo Vivi 已提交
559
	dev_priv->psr.activate(intel_dp);
R
Rodrigo Vivi 已提交
560 561 562
	dev_priv->psr.active = true;
}

563 564 565 566 567 568 569 570
static void hsw_psr_enable_source(struct intel_dp *intel_dp,
				  const struct intel_crtc_state *crtc_state)
{
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = dig_port->base.base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;

571 572
	psr_aux_io_power_get(intel_dp);

573 574 575 576 577 578
	/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
	 * use hardcoded values PSR AUX transactions
	 */
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_psr_setup_aux(intel_dp);

579
	if (dev_priv->psr.psr2_enabled) {
580 581 582 583 584 585 586 587
		u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));

		if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
			chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
				   | PSR2_ADD_VERTICAL_LINE_COUNT);

		else
			chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
588 589
		I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);

590
		I915_WRITE(EDP_PSR_DEBUG,
591 592 593 594 595 596 597 598 599 600 601 602 603
			   EDP_PSR_DEBUG_MASK_MEMUP |
			   EDP_PSR_DEBUG_MASK_HPD |
			   EDP_PSR_DEBUG_MASK_LPSP |
			   EDP_PSR_DEBUG_MASK_MAX_SLEEP |
			   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
	} else {
		/*
		 * Per Spec: Avoid continuous PSR exit by masking MEMUP
		 * and HPD. also mask LPSP to avoid dependency on other
		 * drivers that might block runtime_pm besides
		 * preventing  other hw tracking issues now we can rely
		 * on frontbuffer tracking.
		 */
604
		I915_WRITE(EDP_PSR_DEBUG,
605 606 607 608 609 610
			   EDP_PSR_DEBUG_MASK_MEMUP |
			   EDP_PSR_DEBUG_MASK_HPD |
			   EDP_PSR_DEBUG_MASK_LPSP);
	}
}

R
Rodrigo Vivi 已提交
611 612 613
/**
 * intel_psr_enable - Enable PSR
 * @intel_dp: Intel DP
614
 * @crtc_state: new CRTC state
R
Rodrigo Vivi 已提交
615 616 617
 *
 * This function can only be called after the pipe is fully trained and enabled.
 */
618 619
void intel_psr_enable(struct intel_dp *intel_dp,
		      const struct intel_crtc_state *crtc_state)
R
Rodrigo Vivi 已提交
620 621 622
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
623
	struct drm_i915_private *dev_priv = to_i915(dev);
R
Rodrigo Vivi 已提交
624

625
	if (!crtc_state->has_psr)
R
Rodrigo Vivi 已提交
626 627
		return;

628 629 630
	if (WARN_ON(!CAN_PSR(dev_priv)))
		return;

631
	WARN_ON(dev_priv->drrs.dp);
R
Rodrigo Vivi 已提交
632 633 634 635 636 637
	mutex_lock(&dev_priv->psr.lock);
	if (dev_priv->psr.enabled) {
		DRM_DEBUG_KMS("PSR already in use\n");
		goto unlock;
	}

638
	dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
R
Rodrigo Vivi 已提交
639 640
	dev_priv->psr.busy_frontbuffer_bits = 0;

641
	dev_priv->psr.setup_vsc(intel_dp, crtc_state);
642
	dev_priv->psr.enable_sink(intel_dp);
643
	dev_priv->psr.enable_source(intel_dp, crtc_state);
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
	dev_priv->psr.enabled = intel_dp;

	if (INTEL_GEN(dev_priv) >= 9) {
		intel_psr_activate(intel_dp);
	} else {
		/*
		 * FIXME: Activation should happen immediately since this
		 * function is just called after pipe is fully trained and
		 * enabled.
		 * However on some platforms we face issues when first
		 * activation follows a modeset so quickly.
		 *     - On VLV/CHV we get bank screen on first activation
		 *     - On HSW/BDW we get a recoverable frozen screen until
		 *       next exit-activate sequence.
		 */
659 660
		schedule_delayed_work(&dev_priv->psr.work,
				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
661
	}
662

R
Rodrigo Vivi 已提交
663 664 665 666
unlock:
	mutex_unlock(&dev_priv->psr.lock);
}

667 668
static void vlv_psr_disable(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *old_crtc_state)
R
Rodrigo Vivi 已提交
669 670 671
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
672
	struct drm_i915_private *dev_priv = to_i915(dev);
673
	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
674
	uint32_t val;
R
Rodrigo Vivi 已提交
675

676
	if (dev_priv->psr.active) {
677
		/* Put VLV PSR back to PSR_state 0 (disabled). */
678
		if (intel_wait_for_register(dev_priv,
679
					    VLV_PSRSTAT(crtc->pipe),
680 681 682
					    VLV_EDP_PSR_IN_TRANS,
					    0,
					    1))
683 684
			WARN(1, "PSR transition took longer than expected\n");

685
		val = I915_READ(VLV_PSRCTL(crtc->pipe));
686 687 688
		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
		val &= ~VLV_EDP_PSR_ENABLE;
		val &= ~VLV_EDP_PSR_MODE_MASK;
689
		I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
690 691 692

		dev_priv->psr.active = false;
	} else {
693
		WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
R
Rodrigo Vivi 已提交
694
	}
695 696
}

697 698
static void hsw_psr_disable(struct intel_dp *intel_dp,
			    const struct intel_crtc_state *old_crtc_state)
699 700 701
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
702
	struct drm_i915_private *dev_priv = to_i915(dev);
R
Rodrigo Vivi 已提交
703 704

	if (dev_priv->psr.active) {
705
		i915_reg_t psr_status;
706 707
		u32 psr_status_mask;

708
		if (dev_priv->psr.psr2_enabled) {
709
			psr_status = EDP_PSR2_STATUS;
710 711
			psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;

712 713
			I915_WRITE(EDP_PSR2_CTL,
				   I915_READ(EDP_PSR2_CTL) &
714 715
				   ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));

716
		} else {
717
			psr_status = EDP_PSR_STATUS;
718 719
			psr_status_mask = EDP_PSR_STATUS_STATE_MASK;

720 721
			I915_WRITE(EDP_PSR_CTL,
				   I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
722
		}
723 724 725

		/* Wait till PSR is idle */
		if (intel_wait_for_register(dev_priv,
726
					    psr_status, psr_status_mask, 0,
727 728 729
					    2000))
			DRM_ERROR("Timed out waiting for PSR Idle State\n");

R
Rodrigo Vivi 已提交
730 731
		dev_priv->psr.active = false;
	} else {
732
		if (dev_priv->psr.psr2_enabled)
733 734 735
			WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
		else
			WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
R
Rodrigo Vivi 已提交
736
	}
737 738

	psr_aux_io_power_put(intel_dp);
739 740 741 742 743
}

/**
 * intel_psr_disable - Disable PSR
 * @intel_dp: Intel DP
744
 * @old_crtc_state: old CRTC state
745 746 747
 *
 * This function needs to be called before disabling pipe.
 */
748 749
void intel_psr_disable(struct intel_dp *intel_dp,
		       const struct intel_crtc_state *old_crtc_state)
750 751 752
{
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
	struct drm_device *dev = intel_dig_port->base.base.dev;
753
	struct drm_i915_private *dev_priv = to_i915(dev);
754

755
	if (!old_crtc_state->has_psr)
756 757
		return;

758 759 760
	if (WARN_ON(!CAN_PSR(dev_priv)))
		return;

761 762 763 764 765 766
	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

767
	dev_priv->psr.disable_source(intel_dp, old_crtc_state);
R
Rodrigo Vivi 已提交
768

769 770 771
	/* Disable PSR on Sink */
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);

R
Rodrigo Vivi 已提交
772 773 774 775 776 777 778 779 780 781 782
	dev_priv->psr.enabled = NULL;
	mutex_unlock(&dev_priv->psr.lock);

	cancel_delayed_work_sync(&dev_priv->psr.work);
}

static void intel_psr_work(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv), psr.work.work);
	struct intel_dp *intel_dp = dev_priv->psr.enabled;
783 784
	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
R
Rodrigo Vivi 已提交
785 786 787 788 789 790

	/* We have to make sure PSR is ready for re-enable
	 * otherwise it keeps disabled until next full enable/disable cycle.
	 * PSR might take some time to get fully disabled
	 * and be ready for re-enable.
	 */
791
	if (HAS_DDI(dev_priv)) {
792
		if (dev_priv->psr.psr2_enabled) {
793
			if (intel_wait_for_register(dev_priv,
794 795 796 797
						    EDP_PSR2_STATUS,
						    EDP_PSR2_STATUS_STATE_MASK,
						    0,
						    50)) {
798 799 800 801 802
				DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
				return;
			}
		} else {
			if (intel_wait_for_register(dev_priv,
803 804 805 806
						    EDP_PSR_STATUS,
						    EDP_PSR_STATUS_STATE_MASK,
						    0,
						    50)) {
807 808 809
				DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
				return;
			}
810 811
		}
	} else {
812 813 814 815 816
		if (intel_wait_for_register(dev_priv,
					    VLV_PSRSTAT(pipe),
					    VLV_EDP_PSR_IN_TRANS,
					    0,
					    1)) {
817 818 819
			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
			return;
		}
R
Rodrigo Vivi 已提交
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
	}
	mutex_lock(&dev_priv->psr.lock);
	intel_dp = dev_priv->psr.enabled;

	if (!intel_dp)
		goto unlock;

	/*
	 * The delayed work can race with an invalidate hence we need to
	 * recheck. Since psr_flush first clears this and then reschedules we
	 * won't ever miss a flush when bailing out here.
	 */
	if (dev_priv->psr.busy_frontbuffer_bits)
		goto unlock;

835
	intel_psr_activate(intel_dp);
R
Rodrigo Vivi 已提交
836 837 838 839
unlock:
	mutex_unlock(&dev_priv->psr.lock);
}

840
static void intel_psr_exit(struct drm_i915_private *dev_priv)
R
Rodrigo Vivi 已提交
841
{
842 843 844 845
	struct intel_dp *intel_dp = dev_priv->psr.enabled;
	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
	u32 val;
R
Rodrigo Vivi 已提交
846

847 848 849
	if (!dev_priv->psr.active)
		return;

850
	if (HAS_DDI(dev_priv)) {
851
		if (dev_priv->psr.psr2_enabled) {
852 853 854 855 856 857 858 859
			val = I915_READ(EDP_PSR2_CTL);
			WARN_ON(!(val & EDP_PSR2_ENABLE));
			I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
		} else {
			val = I915_READ(EDP_PSR_CTL);
			WARN_ON(!(val & EDP_PSR_ENABLE));
			I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
		}
860 861 862
	} else {
		val = I915_READ(VLV_PSRCTL(pipe));

863 864 865 866 867 868 869 870
		/*
		 * Here we do the transition drirectly from
		 * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
		 * PSR_state 5 (exit).
		 * PSR State 4 (active with single frame update) can be skipped.
		 * On PSR_state 5 (exit) Hardware is responsible to transition
		 * back to PSR_state 1 (inactive).
		 * Now we are at Same state after vlv_psr_enable_source.
871 872 873 874
		 */
		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
		I915_WRITE(VLV_PSRCTL(pipe), val);

875 876
		/*
		 * Send AUX wake up - Spec says after transitioning to PSR
877 878 879 880 881 882 883 884
		 * active we have to send AUX wake up by writing 01h in DPCD
		 * 600h of sink device.
		 * XXX: This might slow down the transition, but without this
		 * HW doesn't complete the transition to PSR_state 1 and we
		 * never get the screen updated.
		 */
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
				   DP_SET_POWER_D0);
R
Rodrigo Vivi 已提交
885 886
	}

887
	dev_priv->psr.active = false;
R
Rodrigo Vivi 已提交
888 889
}

890 891
/**
 * intel_psr_single_frame_update - Single Frame Update
892
 * @dev_priv: i915 device
893
 * @frontbuffer_bits: frontbuffer plane tracking bits
894 895 896 897 898 899
 *
 * Some platforms support a single frame update feature that is used to
 * send and update only one frame on Remote Frame Buffer.
 * So far it is only implemented for Valleyview and Cherryview because
 * hardware requires this to be done before a page flip.
 */
900
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
901
				   unsigned frontbuffer_bits)
902 903 904 905 906
{
	struct drm_crtc *crtc;
	enum pipe pipe;
	u32 val;

907
	if (!CAN_PSR(dev_priv))
908 909
		return;

910 911 912 913
	/*
	 * Single frame update is already supported on BDW+ but it requires
	 * many W/A and it isn't really needed.
	 */
914
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
915 916 917 918 919 920 921 922 923 924 925
		return;

	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
	pipe = to_intel_crtc(crtc)->pipe;

926 927
	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
		val = I915_READ(VLV_PSRCTL(pipe));
928

929 930 931 932 933 934
		/*
		 * We need to set this bit before writing registers for a flip.
		 * This bit will be self-clear when it gets to the PSR active state.
		 */
		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
	}
935 936 937
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
938 939
/**
 * intel_psr_invalidate - Invalidade PSR
940
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
941
 * @frontbuffer_bits: frontbuffer plane tracking bits
942
 * @origin: which operation caused the invalidate
R
Rodrigo Vivi 已提交
943 944 945 946 947 948 949 950
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 */
951
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
952
			  unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
953 954 955 956
{
	struct drm_crtc *crtc;
	enum pipe pipe;

957
	if (!CAN_PSR(dev_priv))
958 959
		return;

960 961 962
	if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
		return;

R
Rodrigo Vivi 已提交
963 964 965 966 967 968 969 970 971 972 973
	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
	pipe = to_intel_crtc(crtc)->pipe;

	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
974 975

	if (frontbuffer_bits)
976
		intel_psr_exit(dev_priv);
977

R
Rodrigo Vivi 已提交
978 979 980
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
981 982
/**
 * intel_psr_flush - Flush PSR
983
 * @dev_priv: i915 device
R
Rodrigo Vivi 已提交
984
 * @frontbuffer_bits: frontbuffer plane tracking bits
985
 * @origin: which operation caused the flush
R
Rodrigo Vivi 已提交
986 987 988 989 990 991 992 993
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering has completed and flushed out to memory. PSR
 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
 */
994
void intel_psr_flush(struct drm_i915_private *dev_priv,
995
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
R
Rodrigo Vivi 已提交
996 997 998 999
{
	struct drm_crtc *crtc;
	enum pipe pipe;

1000
	if (!CAN_PSR(dev_priv))
1001 1002
		return;

1003 1004 1005
	if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
		return;

R
Rodrigo Vivi 已提交
1006 1007 1008 1009 1010 1011 1012 1013
	mutex_lock(&dev_priv->psr.lock);
	if (!dev_priv->psr.enabled) {
		mutex_unlock(&dev_priv->psr.lock);
		return;
	}

	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
	pipe = to_intel_crtc(crtc)->pipe;
1014 1015

	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
R
Rodrigo Vivi 已提交
1016 1017
	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;

1018
	/* By definition flush = invalidate + flush */
1019
	if (frontbuffer_bits) {
1020
		if (dev_priv->psr.psr2_enabled ||
1021 1022 1023 1024 1025 1026 1027 1028
		    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
			intel_psr_exit(dev_priv);
		} else {
			/*
			 * Display WA #0884: all
			 * This documented WA for bxt can be safely applied
			 * broadly so we can force HW tracking to exit PSR
			 * instead of disabling and re-enabling.
1029
			 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1030 1031 1032
			 * but it makes more sense write to the current active
			 * pipe.
			 */
1033
			I915_WRITE(CURSURFLIVE(pipe), 0);
1034 1035
		}
	}
1036

R
Rodrigo Vivi 已提交
1037
	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1038 1039
		if (!work_busy(&dev_priv->psr.work.work))
			schedule_delayed_work(&dev_priv->psr.work,
1040
					      msecs_to_jiffies(100));
R
Rodrigo Vivi 已提交
1041 1042 1043
	mutex_unlock(&dev_priv->psr.lock);
}

R
Rodrigo Vivi 已提交
1044 1045
/**
 * intel_psr_init - Init basic PSR work and mutex.
1046
 * @dev_priv: i915 device private
R
Rodrigo Vivi 已提交
1047 1048 1049 1050
 *
 * This function is  called only once at driver load to initialize basic
 * PSR stuff.
 */
1051
void intel_psr_init(struct drm_i915_private *dev_priv)
R
Rodrigo Vivi 已提交
1052
{
1053 1054 1055
	if (!HAS_PSR(dev_priv))
		return;

1056 1057 1058
	dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;

1059 1060 1061
	if (!dev_priv->psr.sink_support)
		return;

1062
	/* Per platform default: all disabled. */
1063 1064
	if (i915_modparams.enable_psr == -1)
		i915_modparams.enable_psr = 0;
1065

1066
	/* Set link_standby x link_off defaults */
1067
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1068 1069
		/* HSW and BDW require workarounds that we don't implement. */
		dev_priv->psr.link_standby = false;
1070
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1071 1072 1073 1074 1075 1076
		/* On VLV and CHV only standby mode is supported. */
		dev_priv->psr.link_standby = true;
	else
		/* For new platforms let's respect VBT back again */
		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;

1077
	/* Override link_standby x link_off defaults */
1078
	if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
1079 1080 1081
		DRM_DEBUG_KMS("PSR: Forcing link standby\n");
		dev_priv->psr.link_standby = true;
	}
1082
	if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
1083 1084 1085 1086
		DRM_DEBUG_KMS("PSR: Forcing main link off\n");
		dev_priv->psr.link_standby = false;
	}

R
Rodrigo Vivi 已提交
1087 1088
	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
	mutex_init(&dev_priv->psr.lock);
1089 1090

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1091
		dev_priv->psr.enable_source = vlv_psr_enable_source;
1092
		dev_priv->psr.disable_source = vlv_psr_disable;
1093
		dev_priv->psr.enable_sink = vlv_psr_enable_sink;
R
Rodrigo Vivi 已提交
1094
		dev_priv->psr.activate = vlv_psr_activate;
1095
		dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
1096
	} else {
1097
		dev_priv->psr.has_hw_tracking = true;
1098
		dev_priv->psr.enable_source = hsw_psr_enable_source;
1099
		dev_priv->psr.disable_source = hsw_psr_disable;
1100
		dev_priv->psr.enable_sink = hsw_psr_enable_sink;
R
Rodrigo Vivi 已提交
1101
		dev_priv->psr.activate = hsw_psr_activate;
1102
		dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
1103
	}
R
Rodrigo Vivi 已提交
1104
}