intel_fbc.c 36.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29
/**
 * DOC: Frame Buffer Compression (FBC)
 *
 * FBC tries to save memory bandwidth (and so power consumption) by
 * compressing the amount of memory used by the display. It is total
 * transparent to user space and completely handled in the kernel.
30 31
 *
 * The benefits of FBC are mostly visible with solid backgrounds and
R
Rodrigo Vivi 已提交
32 33
 * variation-less patterns. It comes from keeping the memory footprint small
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
R
Rodrigo Vivi 已提交
35 36 37 38
 * i915 is responsible to reserve stolen memory for FBC and configure its
 * offset on proper registers. The hardware takes care of all
 * compress/decompress. However there are many known cases where we have to
 * forcibly disable it to allow proper screen updates.
39 40
 */

41 42
#include <drm/drm_fourcc.h>

R
Rodrigo Vivi 已提交
43
#include "i915_drv.h"
44
#include "intel_display_types.h"
45
#include "intel_fbc.h"
46
#include "intel_frontbuffer.h"
R
Rodrigo Vivi 已提交
47

P
Paulo Zanoni 已提交
48 49
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
50
	return HAS_FBC(dev_priv);
P
Paulo Zanoni 已提交
51 52
}

53 54 55 56 57 58 59 60
/*
 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
 * origin so the x and y offsets can actually fit the registers. As a
 * consequence, the fence doesn't really start exactly at the display plane
 * address we program because it starts at the real start of the buffer, so we
 * have to take this into consideration here.
 */
61
static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
62
{
63
	return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
64 65
}

66 67 68 69 70
/*
 * For SKL+, the plane source size used by the hardware is based on the value we
 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
 * we wrote to PIPESRC.
 */
71
static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
72 73 74
					    int *width, int *height)
{
	if (width)
75
		*width = cache->plane.src_w;
76
	if (height)
77
		*height = cache->plane.src_h;
78 79
}

80 81
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
					struct intel_fbc_state_cache *cache)
82 83 84
{
	int lines;

85
	intel_fbc_get_plane_source_size(cache, NULL, &lines);
86
	if (IS_GEN(dev_priv, 7))
87
		lines = min(lines, 2048);
88 89
	else if (INTEL_GEN(dev_priv) >= 8)
		lines = min(lines, 2560);
90 91

	/* Hardware needs the full buffer stride, not just the active area. */
92
	return lines * cache->fb.stride;
93 94
}

95
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
96 97 98 99 100 101 102 103 104 105 106 107
{
	u32 fbc_ctl;

	/* Disable compression */
	fbc_ctl = I915_READ(FBC_CONTROL);
	if ((fbc_ctl & FBC_CTL_EN) == 0)
		return;

	fbc_ctl &= ~FBC_CTL_EN;
	I915_WRITE(FBC_CONTROL, fbc_ctl);

	/* Wait for compressing bit to clear */
108 109
	if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
				    FBC_STAT_COMPRESSING, 10)) {
110 111 112 113 114
		DRM_DEBUG_KMS("FBC idle timed out\n");
		return;
	}
}

115
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
116
{
117
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
118 119 120 121
	int cfb_pitch;
	int i;
	u32 fbc_ctl;

122
	/* Note: fbc.threshold == 1 for i8xx */
123 124 125
	cfb_pitch = params->cfb_size / FBC_LL_SIZE;
	if (params->fb.stride < cfb_pitch)
		cfb_pitch = params->fb.stride;
126 127

	/* FBC_CTL wants 32B or 64B units */
128
	if (IS_GEN(dev_priv, 2))
129 130 131 132 133 134
		cfb_pitch = (cfb_pitch / 32) - 1;
	else
		cfb_pitch = (cfb_pitch / 64) - 1;

	/* Clear old tags */
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
135
		I915_WRITE(FBC_TAG(i), 0);
136

137
	if (IS_GEN(dev_priv, 4)) {
138 139 140 141
		u32 fbc_ctl2;

		/* Set it up... */
		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
142
		fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
143
		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
144
		I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
145 146 147 148 149 150
	}

	/* enable it... */
	fbc_ctl = I915_READ(FBC_CONTROL);
	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
151
	if (IS_I945GM(dev_priv))
152 153
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
154
	fbc_ctl |= params->fence_id;
155 156 157
	I915_WRITE(FBC_CONTROL, fbc_ctl);
}

158
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
159 160 161 162
{
	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}

163
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
164
{
165
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
166 167
	u32 dpfc_ctl;

168
	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
169
	if (params->fb.format->cpp[0] == 2)
170 171 172 173
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
	else
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;

174 175
	if (params->fence_id >= 0) {
		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
176 177 178 179
		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
	} else {
		I915_WRITE(DPFC_FENCE_YOFF, 0);
	}
180 181 182 183 184

	/* enable it... */
	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
}

185
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
186 187 188 189 190 191 192 193 194 195 196
{
	u32 dpfc_ctl;

	/* Disable compression */
	dpfc_ctl = I915_READ(DPFC_CONTROL);
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
	}
}

197
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
198 199 200 201
{
	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}

202 203
/* This function forces a CFB recompression through the nuke operation. */
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
204
{
205 206
	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
	POSTING_READ(MSG_FBC_REND_STATE);
207 208
}

209
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
210
{
211
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
212
	u32 dpfc_ctl;
213
	int threshold = dev_priv->fbc.threshold;
214

215
	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
216
	if (params->fb.format->cpp[0] == 2)
217
		threshold++;
218

219
	switch (threshold) {
220 221 222 223 224 225 226 227 228 229 230
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}
231

232
	if (params->fence_id >= 0) {
233
		dpfc_ctl |= DPFC_CTL_FENCE_EN;
234
		if (IS_GEN(dev_priv, 5))
235
			dpfc_ctl |= params->fence_id;
236
		if (IS_GEN(dev_priv, 6)) {
237
			I915_WRITE(SNB_DPFC_CTL_SA,
238
				   SNB_CPU_FENCE_ENABLE |
239
				   params->fence_id);
240 241 242 243
			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
				   params->crtc.fence_y_offset);
		}
	} else {
244
		if (IS_GEN(dev_priv, 6)) {
245 246 247 248
			I915_WRITE(SNB_DPFC_CTL_SA, 0);
			I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
		}
	}
249

250
	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
251 252 253
	/* enable it... */
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

254
	intel_fbc_recompress(dev_priv);
255 256
}

257
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
258 259 260 261 262 263 264 265 266 267 268
{
	u32 dpfc_ctl;

	/* Disable compression */
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
	}
}

269
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
270 271 272 273
{
	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}

274
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
275
{
276
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
277
	u32 dpfc_ctl;
278
	int threshold = dev_priv->fbc.threshold;
279

280
	/* Display WA #0529: skl, kbl, bxt. */
281
	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
282 283 284 285
		u32 val = I915_READ(CHICKEN_MISC_4);

		val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);

286
		if (params->gen9_wa_cfb_stride)
287 288 289 290 291
			val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;

		I915_WRITE(CHICKEN_MISC_4, val);
	}

292
	dpfc_ctl = 0;
293
	if (IS_IVYBRIDGE(dev_priv))
294
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
295

296
	if (params->fb.format->cpp[0] == 2)
297
		threshold++;
298

299
	switch (threshold) {
300 301 302 303 304 305 306 307 308 309 310 311
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}

312
	if (params->fence_id >= 0) {
313 314
		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
		I915_WRITE(SNB_DPFC_CTL_SA,
315
			   SNB_CPU_FENCE_ENABLE |
316
			   params->fence_id);
317 318 319 320 321
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
	} else {
		I915_WRITE(SNB_DPFC_CTL_SA,0);
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
	}
322 323 324 325

	if (dev_priv->fbc.false_color)
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;

326
	if (IS_IVYBRIDGE(dev_priv)) {
327 328 329 330
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
			   ILK_FBCQ_DIS);
331
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
332
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
333 334
		I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
			   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
335 336 337
			   HSW_FBCQ_DIS);
	}

338 339
	if (INTEL_GEN(dev_priv) >= 11)
		/* Wa_1409120013:icl,ehl,tgl */
M
Matt Roper 已提交
340 341
		I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);

342 343
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

344
	intel_fbc_recompress(dev_priv);
345 346
}

347 348
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
349
	if (INTEL_GEN(dev_priv) >= 5)
350 351 352 353 354 355 356 357 358
		return ilk_fbc_is_active(dev_priv);
	else if (IS_GM45(dev_priv))
		return g4x_fbc_is_active(dev_priv);
	else
		return i8xx_fbc_is_active(dev_priv);
}

static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
359 360 361 362
	struct intel_fbc *fbc = &dev_priv->fbc;

	fbc->active = true;

363
	if (INTEL_GEN(dev_priv) >= 7)
364
		gen7_fbc_activate(dev_priv);
365
	else if (INTEL_GEN(dev_priv) >= 5)
366 367 368 369 370 371 372 373 374
		ilk_fbc_activate(dev_priv);
	else if (IS_GM45(dev_priv))
		g4x_fbc_activate(dev_priv);
	else
		i8xx_fbc_activate(dev_priv);
}

static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
{
375 376 377 378
	struct intel_fbc *fbc = &dev_priv->fbc;

	fbc->active = false;

379
	if (INTEL_GEN(dev_priv) >= 5)
380 381 382 383 384 385 386
		ilk_fbc_deactivate(dev_priv);
	else if (IS_GM45(dev_priv))
		g4x_fbc_deactivate(dev_priv);
	else
		i8xx_fbc_deactivate(dev_priv);
}

R
Rodrigo Vivi 已提交
387
/**
388
 * intel_fbc_is_active - Is FBC active?
389
 * @dev_priv: i915 device instance
R
Rodrigo Vivi 已提交
390 391
 *
 * This function is used to verify the current state of FBC.
D
Daniel Vetter 已提交
392
 *
R
Rodrigo Vivi 已提交
393
 * FIXME: This should be tracked in the plane config eventually
D
Daniel Vetter 已提交
394
 * instead of queried at runtime for most callers.
R
Rodrigo Vivi 已提交
395
 */
396
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
397
{
398
	return dev_priv->fbc.active;
399 400
}

401 402
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
				 const char *reason)
P
Paulo Zanoni 已提交
403
{
404 405 406
	struct intel_fbc *fbc = &dev_priv->fbc;

	WARN_ON(!mutex_is_locked(&fbc->lock));
P
Paulo Zanoni 已提交
407

408
	if (fbc->active)
409
		intel_fbc_hw_deactivate(dev_priv);
410 411

	fbc->no_fbc_reason = reason;
412 413
}

414
static int find_compression_threshold(struct drm_i915_private *dev_priv,
415
				      struct drm_mm_node *node,
416 417
				      unsigned int size,
				      unsigned int fb_cpp)
418 419 420
{
	int compression_threshold = 1;
	int ret;
421 422 423 424 425 426
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
427
	if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
428
		end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
429
	else
430
		end = U64_MAX;
431 432 433 434 435 436 437 438 439

	/* HACK: This code depends on what we will do in *_enable_fbc. If that
	 * code changes, this code needs to change as well.
	 *
	 * The enable_fbc code will attempt to use one of our 2 compression
	 * thresholds, therefore, in that case, we only have 1 resort.
	 */

	/* Try to over-allocate to reduce reallocations and fragmentation. */
440 441
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
						   4096, 0, end);
442 443 444 445 446 447 448 449 450
	if (ret == 0)
		return compression_threshold;

again:
	/* HW's ability to limit the CFB is 1:4 */
	if (compression_threshold > 4 ||
	    (fb_cpp == 2 && compression_threshold == 2))
		return 0;

451 452
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
						   4096, 0, end);
453
	if (ret && INTEL_GEN(dev_priv) <= 4) {
454 455 456 457 458 459 460 461 462
		return 0;
	} else if (ret) {
		compression_threshold <<= 1;
		goto again;
	} else {
		return compression_threshold;
	}
}

463 464
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
			       unsigned int size, unsigned int fb_cpp)
465
{
466
	struct intel_fbc *fbc = &dev_priv->fbc;
467
	struct drm_mm_node *uninitialized_var(compressed_llb);
468
	int ret;
469

470
	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
471

472
	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
473 474 475 476 477 478 479 480
					 size, fb_cpp);
	if (!ret)
		goto err_llb;
	else if (ret > 1) {
		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");

	}

481
	fbc->threshold = ret;
482

483
	if (INTEL_GEN(dev_priv) >= 5)
484
		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
485
	else if (IS_GM45(dev_priv)) {
486
		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
487 488 489 490 491 492 493 494 495 496
	} else {
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
		if (!compressed_llb)
			goto err_fb;

		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
						  4096, 4096);
		if (ret)
			goto err_fb;

497
		fbc->compressed_llb = compressed_llb;
498

499 500 501 502 503 504
		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
					     fbc->compressed_fb.start,
					     U32_MAX));
		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
					     fbc->compressed_llb->start,
					     U32_MAX));
505
		I915_WRITE(FBC_CFB_BASE,
506
			   dev_priv->dsm.start + fbc->compressed_fb.start);
507
		I915_WRITE(FBC_LL_BASE,
508
			   dev_priv->dsm.start + compressed_llb->start);
509 510
	}

511
	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
512
		      fbc->compressed_fb.size, fbc->threshold);
513 514 515 516 517

	return 0;

err_fb:
	kfree(compressed_llb);
518
	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
519
err_llb:
520 521
	if (drm_mm_initialized(&dev_priv->mm.stolen))
		pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
522 523 524
	return -ENOSPC;
}

525
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
526
{
527 528 529 530 531 532 533 534
	struct intel_fbc *fbc = &dev_priv->fbc;

	if (drm_mm_node_allocated(&fbc->compressed_fb))
		i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);

	if (fbc->compressed_llb) {
		i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
		kfree(fbc->compressed_llb);
535 536 537
	}
}

538
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
539
{
540 541
	struct intel_fbc *fbc = &dev_priv->fbc;

P
Paulo Zanoni 已提交
542
	if (!fbc_supported(dev_priv))
543 544
		return;

545
	mutex_lock(&fbc->lock);
546
	__intel_fbc_cleanup_cfb(dev_priv);
547
	mutex_unlock(&fbc->lock);
P
Paulo Zanoni 已提交
548 549
}

550 551 552
static bool stride_is_valid(struct drm_i915_private *dev_priv,
			    unsigned int stride)
{
553 554 555
	/* This should have been caught earlier. */
	if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
		return false;
556 557

	/* Below are the additional FBC restrictions. */
558 559
	if (stride < 512)
		return false;
560

561
	if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
562 563
		return stride == 4096 || stride == 8192;

564
	if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
565 566 567 568 569 570 571 572
		return false;

	if (stride > 16384)
		return false;

	return true;
}

573
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
574
				  u32 pixel_format)
575
{
576
	switch (pixel_format) {
577 578 579 580 581 582
	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_XBGR8888:
		return true;
	case DRM_FORMAT_XRGB1555:
	case DRM_FORMAT_RGB565:
		/* 16bpp not supported on gen2 */
583
		if (IS_GEN(dev_priv, 2))
584 585 586 587 588 589 590 591 592 593
			return false;
		/* WaFbcOnly1to1Ratio:ctg */
		if (IS_G4X(dev_priv))
			return false;
		return true;
	default:
		return false;
	}
}

594 595 596 597 598 599 600
/*
 * For some reason, the hardware tracking starts looking at whatever we
 * programmed as the display plane base address register. It does not look at
 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
 * variables instead of just looking at the pipe/plane size.
 */
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
601
{
602
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
603
	struct intel_fbc *fbc = &dev_priv->fbc;
604
	unsigned int effective_w, effective_h, max_w, max_h;
605

606 607 608 609
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
		max_w = 5120;
		max_h = 4096;
	} else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
610 611
		max_w = 4096;
		max_h = 4096;
612
	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
613 614 615 616 617 618 619
		max_w = 4096;
		max_h = 2048;
	} else {
		max_w = 2048;
		max_h = 1536;
	}

620 621
	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
					&effective_h);
622 623
	effective_w += fbc->state_cache.plane.adjusted_x;
	effective_h += fbc->state_cache.plane.adjusted_y;
624 625

	return effective_w <= max_w && effective_h <= max_h;
626 627
}

628
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
629 630
					 const struct intel_crtc_state *crtc_state,
					 const struct intel_plane_state *plane_state)
631
{
632
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
633
	struct intel_fbc *fbc = &dev_priv->fbc;
634
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
635
	struct drm_framebuffer *fb = plane_state->hw.fb;
636

637 638 639
	cache->plane.visible = plane_state->uapi.visible;
	if (!cache->plane.visible)
		return;
640

641
	cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
642
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
643
		cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
644

645
	cache->plane.rotation = plane_state->hw.rotation;
646 647 648 649 650
	/*
	 * Src coordinates are already rotated by 270 degrees for
	 * the 90/270 degree plane rotation cases (to match the
	 * GTT mapping), hence no need to account for rotation here.
	 */
651 652
	cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
	cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
653 654
	cache->plane.adjusted_x = plane_state->color_plane[0].x;
	cache->plane.adjusted_y = plane_state->color_plane[0].y;
655
	cache->plane.y = plane_state->uapi.src.y1 >> 16;
656

657
	cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
658

659
	cache->fb.format = fb->format;
660
	cache->fb.stride = fb->pitches[0];
661

662 663 664 665 666 667 668 669
	WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
		!plane_state->vma->fence);

	if (plane_state->flags & PLANE_HAS_FENCE &&
	    plane_state->vma->fence)
		cache->fence_id = plane_state->vma->fence->id;
	else
		cache->fence_id = -1;
670 671 672 673
}

static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
674
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
675 676 677
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;

678 679 680 681 682
	if (!cache->plane.visible) {
		fbc->no_fbc_reason = "primary plane not visible";
		return false;
	}

683 684 685 686 687 688 689 690
	/* We don't need to use a state cache here since this information is
	 * global for all CRTC.
	 */
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

691
	if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
692
		fbc->no_fbc_reason = "incompatible mode";
693
		return false;
694 695
	}

696
	if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
697
		fbc->no_fbc_reason = "mode too large for compression";
698
		return false;
699
	}
700

701 702
	/* The use of a CPU fence is mandatory in order to detect writes
	 * by the CPU to the scanout and trigger updates to the FBC.
703 704 705 706
	 *
	 * Note that is possible for a tiled surface to be unmappable (and
	 * so have no fence associated with it) due to aperture constaints
	 * at the time of pinning.
707 708 709 710 711 712
	 *
	 * FIXME with 90/270 degree rotation we should use the fence on
	 * the normal GTT view (the rotated view doesn't even have a
	 * fence). Would need changes to the FBC fence Y offset as well.
	 * For now this will effecively disable FBC with 90/270 degree
	 * rotation.
713
	 */
714
	if (cache->fence_id < 0) {
715 716
		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
		return false;
717
	}
718
	if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
719
	    cache->plane.rotation != DRM_MODE_ROTATE_0) {
720
		fbc->no_fbc_reason = "rotation unsupported";
721
		return false;
722 723
	}

724
	if (!stride_is_valid(dev_priv, cache->fb.stride)) {
725
		fbc->no_fbc_reason = "framebuffer stride not supported";
726
		return false;
727 728
	}

729
	if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
730
		fbc->no_fbc_reason = "pixel format is invalid";
731
		return false;
732 733
	}

734 735 736 737 738 739
	if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
	    cache->fb.format->has_alpha) {
		fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
		return false;
	}

740 741
	/* WaFbcExceedCdClockThreshold:hsw,bdw */
	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
742
	    cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
743
		fbc->no_fbc_reason = "pixel rate is too big";
744
		return false;
745 746
	}

747 748 749 750 751 752 753 754 755 756
	/* It is possible for the required CFB size change without a
	 * crtc->disable + crtc->enable since it is possible to change the
	 * stride without triggering a full modeset. Since we try to
	 * over-allocate the CFB, there's a chance we may keep FBC enabled even
	 * if this happens, but if we exceed the current CFB size we'll have to
	 * disable FBC. Notice that it would be possible to disable FBC, wait
	 * for a frame, free the stolen node, then try to reenable FBC in case
	 * we didn't get any invalidate/deactivate calls, but this would require
	 * a lot of tracking just for a specific case. If we conclude it's an
	 * important case, we can implement it later. */
757
	if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
758
	    fbc->compressed_fb.size * fbc->threshold) {
759
		fbc->no_fbc_reason = "CFB requirements changed";
760 761 762
		return false;
	}

763 764 765 766 767
	/*
	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
	 * and screen flicker.
	 */
768
	if (IS_GEN_RANGE(dev_priv, 9, 10) &&
769 770 771 772 773
	    (fbc->state_cache.plane.adjusted_y & 3)) {
		fbc->no_fbc_reason = "plane Y offset is misaligned";
		return false;
	}

774 775 776
	return true;
}

777
static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
778
{
779
	struct intel_fbc *fbc = &dev_priv->fbc;
780

781
	if (intel_vgpu_active(dev_priv)) {
782
		fbc->no_fbc_reason = "VGPU is active";
783 784 785
		return false;
	}

786
	if (!i915_modparams.enable_fbc) {
787
		fbc->no_fbc_reason = "disabled per module param or by default";
788 789 790
		return false;
	}

791 792 793 794 795
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

796 797 798
	return true;
}

799 800 801
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
				     struct intel_fbc_reg_params *params)
{
802
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
803 804
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
805 806 807 808 809 810

	/* Since all our fields are integer types, use memset here so the
	 * comparison function can rely on memcmp because the padding will be
	 * zero. */
	memset(params, 0, sizeof(*params));

811
	params->fence_id = cache->fence_id;
812

813
	params->crtc.pipe = crtc->pipe;
V
Ville Syrjälä 已提交
814
	params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
815
	params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
816

817
	params->fb.format = cache->fb.format;
818
	params->fb.stride = cache->fb.stride;
819

820
	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
821

822
	params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride;
823 824

	params->plane_visible = cache->plane.visible;
825 826
}

827
void intel_fbc_pre_update(struct intel_crtc *crtc,
828 829
			  const struct intel_crtc_state *crtc_state,
			  const struct intel_plane_state *plane_state)
830
{
831
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
832
	struct intel_fbc *fbc = &dev_priv->fbc;
833
	const char *reason = "update pending";
834

835 836 837 838
	if (!fbc_supported(dev_priv))
		return;

	mutex_lock(&fbc->lock);
839

840
	if (!fbc->enabled || fbc->crtc != crtc)
841
		goto unlock;
842

843
	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
844
	fbc->flip_pending = true;
845

846
	intel_fbc_deactivate(dev_priv, reason);
847 848
unlock:
	mutex_unlock(&fbc->lock);
849 850
}

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
/**
 * __intel_fbc_disable - disable FBC
 * @dev_priv: i915 device instance
 *
 * This is the low level function that actually disables FBC. Callers should
 * grab the FBC lock.
 */
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_crtc *crtc = fbc->crtc;

	WARN_ON(!mutex_is_locked(&fbc->lock));
	WARN_ON(!fbc->enabled);
	WARN_ON(fbc->active);

	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));

	__intel_fbc_cleanup_cfb(dev_priv);

	fbc->enabled = false;
	fbc->crtc = NULL;
}

875
static void __intel_fbc_post_update(struct intel_crtc *crtc)
876
{
877
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
878 879 880 881 882 883 884
	struct intel_fbc *fbc = &dev_priv->fbc;

	WARN_ON(!mutex_is_locked(&fbc->lock));

	if (!fbc->enabled || fbc->crtc != crtc)
		return;

885 886 887
	fbc->flip_pending = false;
	WARN_ON(fbc->active);

888 889 890 891 892 893 894
	if (!i915_modparams.enable_fbc) {
		intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
		__intel_fbc_disable(dev_priv);

		return;
	}

895
	intel_fbc_get_reg_params(crtc, &fbc->params);
896

897
	if (!intel_fbc_can_activate(crtc))
898 899
		return;

900 901
	if (!fbc->busy_bits) {
		intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
902
		intel_fbc_hw_activate(dev_priv);
903 904
	} else
		intel_fbc_deactivate(dev_priv, "frontbuffer write");
P
Paulo Zanoni 已提交
905 906
}

907
void intel_fbc_post_update(struct intel_crtc *crtc)
P
Paulo Zanoni 已提交
908
{
909
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
910
	struct intel_fbc *fbc = &dev_priv->fbc;
911

P
Paulo Zanoni 已提交
912
	if (!fbc_supported(dev_priv))
913 914
		return;

915
	mutex_lock(&fbc->lock);
916
	__intel_fbc_post_update(crtc);
917
	mutex_unlock(&fbc->lock);
918 919
}

920 921 922 923 924 925 926 927
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
	if (fbc->enabled)
		return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
	else
		return fbc->possible_framebuffer_bits;
}

928 929 930 931
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
			  unsigned int frontbuffer_bits,
			  enum fb_op_origin origin)
{
932
	struct intel_fbc *fbc = &dev_priv->fbc;
933

P
Paulo Zanoni 已提交
934
	if (!fbc_supported(dev_priv))
935 936
		return;

937
	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
938 939
		return;

940
	mutex_lock(&fbc->lock);
P
Paulo Zanoni 已提交
941

942
	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
943

944
	if (fbc->enabled && fbc->busy_bits)
945
		intel_fbc_deactivate(dev_priv, "frontbuffer write");
P
Paulo Zanoni 已提交
946

947
	mutex_unlock(&fbc->lock);
948 949 950
}

void intel_fbc_flush(struct drm_i915_private *dev_priv,
951
		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
952
{
953 954
	struct intel_fbc *fbc = &dev_priv->fbc;

P
Paulo Zanoni 已提交
955
	if (!fbc_supported(dev_priv))
956 957
		return;

958
	mutex_lock(&fbc->lock);
959

960
	fbc->busy_bits &= ~frontbuffer_bits;
961

962 963 964
	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
		goto out;

965 966
	if (!fbc->busy_bits && fbc->enabled &&
	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
967
		if (fbc->active)
968
			intel_fbc_recompress(dev_priv);
969
		else if (!fbc->flip_pending)
970
			__intel_fbc_post_update(fbc->crtc);
971
	}
P
Paulo Zanoni 已提交
972

973
out:
974
	mutex_unlock(&fbc->lock);
975 976
}

977 978 979 980 981 982 983 984 985 986 987 988 989
/**
 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
 * @dev_priv: i915 device instance
 * @state: the atomic state structure
 *
 * This function looks at the proposed state for CRTCs and planes, then chooses
 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
 * true.
 *
 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
 */
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
990
			   struct intel_atomic_state *state)
991 992
{
	struct intel_fbc *fbc = &dev_priv->fbc;
993 994
	struct intel_plane *plane;
	struct intel_plane_state *plane_state;
995
	bool crtc_chosen = false;
996
	int i;
997 998 999

	mutex_lock(&fbc->lock);

1000 1001
	/* Does this atomic commit involve the CRTC currently tied to FBC? */
	if (fbc->crtc &&
1002
	    !intel_atomic_get_new_crtc_state(state, fbc->crtc))
1003 1004
		goto out;

1005 1006 1007
	if (!intel_fbc_can_enable(dev_priv))
		goto out;

1008 1009 1010 1011
	/* Simply choose the first CRTC that is compatible and has a visible
	 * plane. We could go for fancier schemes such as checking the plane
	 * size, but this would just affect the few platforms that don't tie FBC
	 * to pipe or plane A. */
1012 1013
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_crtc_state *crtc_state;
1014
		struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1015

1016
		if (!plane->has_fbc)
1017 1018
			continue;

1019
		if (!plane_state->uapi.visible)
1020 1021
			continue;

1022
		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1023

1024
		crtc_state->enable_fbc = true;
1025
		crtc_chosen = true;
1026
		break;
1027 1028
	}

1029 1030 1031
	if (!crtc_chosen)
		fbc->no_fbc_reason = "no suitable CRTC for FBC";

1032 1033 1034 1035
out:
	mutex_unlock(&fbc->lock);
}

1036 1037 1038
/**
 * intel_fbc_enable: tries to enable FBC on the CRTC
 * @crtc: the CRTC
1039 1040
 * @crtc_state: corresponding &drm_crtc_state for @crtc
 * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
1041
 *
1042
 * This function checks if the given CRTC was chosen for FBC, then enables it if
1043 1044 1045
 * possible. Notice that it doesn't activate FBC. It is valid to call
 * intel_fbc_enable multiple times for the same pipe without an
 * intel_fbc_disable in the middle, as long as it is deactivated.
1046
 */
1047
void intel_fbc_enable(struct intel_crtc *crtc,
1048 1049
		      const struct intel_crtc_state *crtc_state,
		      const struct intel_plane_state *plane_state)
1050
{
1051
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1052
	struct intel_fbc *fbc = &dev_priv->fbc;
1053 1054
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
	const struct drm_framebuffer *fb = plane_state->hw.fb;
1055 1056 1057 1058

	if (!fbc_supported(dev_priv))
		return;

1059
	mutex_lock(&fbc->lock);
1060

1061
	if (fbc->enabled) {
1062 1063
		WARN_ON(fbc->crtc == NULL);
		if (fbc->crtc == crtc) {
1064
			WARN_ON(!crtc_state->enable_fbc);
1065 1066
			WARN_ON(fbc->active);
		}
1067 1068 1069
		goto out;
	}

1070
	if (!crtc_state->enable_fbc)
1071 1072
		goto out;

1073 1074
	WARN_ON(fbc->active);
	WARN_ON(fbc->crtc != NULL);
1075

1076
	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1077 1078 1079 1080 1081 1082 1083 1084

	/* FIXME crtc_state->enable_fbc lies :( */
	if (!cache->plane.visible)
		goto out;

	if (intel_fbc_alloc_cfb(dev_priv,
				intel_fbc_calculate_cfb_size(dev_priv, cache),
				fb->format->cpp[0])) {
1085
		fbc->no_fbc_reason = "not enough stolen memory";
1086 1087 1088
		goto out;
	}

1089 1090 1091 1092 1093 1094 1095
	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) &&
	    fb->modifier != I915_FORMAT_MOD_X_TILED)
		cache->gen9_wa_cfb_stride =
			DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
	else
		cache->gen9_wa_cfb_stride = 0;

1096
	DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1097
	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1098

1099 1100
	fbc->enabled = true;
	fbc->crtc = crtc;
1101
out:
1102
	mutex_unlock(&fbc->lock);
1103 1104 1105
}

/**
1106
 * intel_fbc_disable - disable FBC if it's associated with crtc
1107 1108 1109 1110
 * @crtc: the CRTC
 *
 * This function disables FBC if it's associated with the provided CRTC.
 */
1111
void intel_fbc_disable(struct intel_crtc *crtc)
1112
{
1113
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1114
	struct intel_fbc *fbc = &dev_priv->fbc;
1115 1116 1117 1118

	if (!fbc_supported(dev_priv))
		return;

1119
	mutex_lock(&fbc->lock);
1120
	if (fbc->crtc == crtc)
1121
		__intel_fbc_disable(dev_priv);
1122
	mutex_unlock(&fbc->lock);
1123 1124 1125
}

/**
1126
 * intel_fbc_global_disable - globally disable FBC
1127 1128 1129 1130
 * @dev_priv: i915 device instance
 *
 * This function disables FBC regardless of which CRTC is associated with it.
 */
1131
void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
1132
{
1133 1134
	struct intel_fbc *fbc = &dev_priv->fbc;

1135 1136 1137
	if (!fbc_supported(dev_priv))
		return;

1138
	mutex_lock(&fbc->lock);
1139 1140
	if (fbc->enabled) {
		WARN_ON(fbc->crtc->active);
1141
		__intel_fbc_disable(dev_priv);
1142
	}
1143
	mutex_unlock(&fbc->lock);
1144 1145
}

1146 1147 1148 1149 1150 1151 1152 1153 1154
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, fbc.underrun_work);
	struct intel_fbc *fbc = &dev_priv->fbc;

	mutex_lock(&fbc->lock);

	/* Maybe we were scheduled twice. */
1155
	if (fbc->underrun_detected || !fbc->enabled)
1156 1157 1158 1159 1160
		goto out;

	DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
	fbc->underrun_detected = true;

1161
	intel_fbc_deactivate(dev_priv, "FIFO underrun");
1162 1163 1164 1165
out:
	mutex_unlock(&fbc->lock);
}

1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
/*
 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
 * @dev_priv: i915 device instance
 *
 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
 * want to re-enable FBC after an underrun to increase test coverage.
 */
int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
{
	int ret;

	cancel_work_sync(&dev_priv->fbc.underrun_work);

	ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
	if (ret)
		return ret;

	if (dev_priv->fbc.underrun_detected) {
		DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
		dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
	}

	dev_priv->fbc.underrun_detected = false;
	mutex_unlock(&dev_priv->fbc.lock);

	return 0;
}

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
/**
 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
 * @dev_priv: i915 device instance
 *
 * Without FBC, most underruns are harmless and don't really cause too many
 * problems, except for an annoying message on dmesg. With FBC, underruns can
 * become black screens or even worse, especially when paired with bad
 * watermarks. So in order for us to be on the safe side, completely disable FBC
 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
 * already suggests that watermarks may be bad, so try to be as safe as
 * possible.
 *
 * This function is called from the IRQ handler.
 */
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;

	if (!fbc_supported(dev_priv))
		return;

	/* There's no guarantee that underrun_detected won't be set to true
	 * right after this check and before the work is scheduled, but that's
	 * not a problem since we'll check it again under the work function
	 * while FBC is locked. This check here is just to prevent us from
	 * unnecessarily scheduling the work, and it relies on the fact that we
	 * never switch underrun_detect back to false after it's true. */
	if (READ_ONCE(fbc->underrun_detected))
		return;

	schedule_work(&fbc->underrun_work);
}

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
1238 1239
	if (i915_modparams.enable_fbc >= 0)
		return !!i915_modparams.enable_fbc;
1240

1241 1242 1243
	if (!HAS_FBC(dev_priv))
		return 0;

1244
	/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
1245
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
1246 1247
		return 0;

P
Paulo Zanoni 已提交
1248
	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
1249 1250 1251 1252 1253
		return 1;

	return 0;
}

1254 1255 1256
static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
{
	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1257
	if (intel_vtd_active() &&
1258 1259 1260 1261 1262 1263 1264 1265
	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
		DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
		return true;
	}

	return false;
}

R
Rodrigo Vivi 已提交
1266 1267 1268 1269 1270 1271
/**
 * intel_fbc_init - Initialize FBC
 * @dev_priv: the i915 device
 *
 * This function might be called during PM init process.
 */
1272 1273
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
1274
	struct intel_fbc *fbc = &dev_priv->fbc;
1275

1276
	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1277 1278 1279
	mutex_init(&fbc->lock);
	fbc->enabled = false;
	fbc->active = false;
P
Paulo Zanoni 已提交
1280

1281 1282 1283
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		mkwrite_device_info(dev_priv)->display.has_fbc = false;

1284
	if (need_fbc_vtd_wa(dev_priv))
1285
		mkwrite_device_info(dev_priv)->display.has_fbc = false;
1286

1287 1288 1289
	i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
		      i915_modparams.enable_fbc);
1290

1291
	if (!HAS_FBC(dev_priv)) {
1292
		fbc->no_fbc_reason = "unsupported by this chipset";
1293 1294 1295
		return;
	}

1296
	/* This value was pulled out of someone's hat */
1297
	if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
1298 1299
		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);

1300
	/* We still don't have any sort of hardware state readout for FBC, so
1301 1302
	 * deactivate it in case the BIOS activated it to make sure software
	 * matches the hardware state. */
1303 1304
	if (intel_fbc_hw_is_active(dev_priv))
		intel_fbc_hw_deactivate(dev_priv);
1305
}