intel_fbc.c 35.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29
/**
 * DOC: Frame Buffer Compression (FBC)
 *
 * FBC tries to save memory bandwidth (and so power consumption) by
 * compressing the amount of memory used by the display. It is total
 * transparent to user space and completely handled in the kernel.
30 31
 *
 * The benefits of FBC are mostly visible with solid backgrounds and
R
Rodrigo Vivi 已提交
32 33
 * variation-less patterns. It comes from keeping the memory footprint small
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
R
Rodrigo Vivi 已提交
35 36 37 38
 * i915 is responsible to reserve stolen memory for FBC and configure its
 * offset on proper registers. The hardware takes care of all
 * compress/decompress. However there are many known cases where we have to
 * forcibly disable it to allow proper screen updates.
39 40
 */

41 42
#include <drm/drm_fourcc.h>

R
Rodrigo Vivi 已提交
43
#include "i915_drv.h"
44
#include "intel_display_types.h"
45
#include "intel_fbc.h"
46
#include "intel_frontbuffer.h"
R
Rodrigo Vivi 已提交
47

P
Paulo Zanoni 已提交
48 49
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
50
	return HAS_FBC(dev_priv);
P
Paulo Zanoni 已提交
51 52
}

53 54 55 56 57 58 59 60
/*
 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
 * origin so the x and y offsets can actually fit the registers. As a
 * consequence, the fence doesn't really start exactly at the display plane
 * address we program because it starts at the real start of the buffer, so we
 * have to take this into consideration here.
 */
61
static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
62
{
63
	return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
64 65
}

66 67 68 69 70
/*
 * For SKL+, the plane source size used by the hardware is based on the value we
 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
 * we wrote to PIPESRC.
 */
71
static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
72 73 74
					    int *width, int *height)
{
	if (width)
75
		*width = cache->plane.src_w;
76
	if (height)
77
		*height = cache->plane.src_h;
78 79
}

80 81
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
					struct intel_fbc_state_cache *cache)
82 83 84
{
	int lines;

85
	intel_fbc_get_plane_source_size(cache, NULL, &lines);
86
	if (IS_GEN(dev_priv, 7))
87
		lines = min(lines, 2048);
88 89
	else if (INTEL_GEN(dev_priv) >= 8)
		lines = min(lines, 2560);
90 91

	/* Hardware needs the full buffer stride, not just the active area. */
92
	return lines * cache->fb.stride;
93 94
}

95
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
96 97 98 99 100 101 102 103 104 105 106 107
{
	u32 fbc_ctl;

	/* Disable compression */
	fbc_ctl = I915_READ(FBC_CONTROL);
	if ((fbc_ctl & FBC_CTL_EN) == 0)
		return;

	fbc_ctl &= ~FBC_CTL_EN;
	I915_WRITE(FBC_CONTROL, fbc_ctl);

	/* Wait for compressing bit to clear */
108 109
	if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
				    FBC_STAT_COMPRESSING, 10)) {
110 111 112 113 114
		DRM_DEBUG_KMS("FBC idle timed out\n");
		return;
	}
}

115
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
116
{
117
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
118 119 120 121
	int cfb_pitch;
	int i;
	u32 fbc_ctl;

122
	/* Note: fbc.threshold == 1 for i8xx */
123 124 125
	cfb_pitch = params->cfb_size / FBC_LL_SIZE;
	if (params->fb.stride < cfb_pitch)
		cfb_pitch = params->fb.stride;
126 127

	/* FBC_CTL wants 32B or 64B units */
128
	if (IS_GEN(dev_priv, 2))
129 130 131 132 133 134
		cfb_pitch = (cfb_pitch / 32) - 1;
	else
		cfb_pitch = (cfb_pitch / 64) - 1;

	/* Clear old tags */
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
135
		I915_WRITE(FBC_TAG(i), 0);
136

137
	if (IS_GEN(dev_priv, 4)) {
138 139 140 141
		u32 fbc_ctl2;

		/* Set it up... */
		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
142
		fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
143
		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
144
		I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
145 146 147 148 149 150
	}

	/* enable it... */
	fbc_ctl = I915_READ(FBC_CONTROL);
	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
151
	if (IS_I945GM(dev_priv))
152 153
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
154
	fbc_ctl |= params->vma->fence->id;
155 156 157
	I915_WRITE(FBC_CONTROL, fbc_ctl);
}

158
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
159 160 161 162
{
	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}

163
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
164
{
165
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
166 167
	u32 dpfc_ctl;

168
	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
169
	if (params->fb.format->cpp[0] == 2)
170 171 172 173
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
	else
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;

174
	if (params->flags & PLANE_HAS_FENCE) {
175
		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
176 177 178 179
		I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
	} else {
		I915_WRITE(DPFC_FENCE_YOFF, 0);
	}
180 181 182 183 184

	/* enable it... */
	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
}

185
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
186 187 188 189 190 191 192 193 194 195 196
{
	u32 dpfc_ctl;

	/* Disable compression */
	dpfc_ctl = I915_READ(DPFC_CONTROL);
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
	}
}

197
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
198 199 200 201
{
	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}

202 203
/* This function forces a CFB recompression through the nuke operation. */
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
204
{
205 206
	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
	POSTING_READ(MSG_FBC_REND_STATE);
207 208
}

209
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
210
{
211
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
212
	u32 dpfc_ctl;
213
	int threshold = dev_priv->fbc.threshold;
214

215
	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
216
	if (params->fb.format->cpp[0] == 2)
217
		threshold++;
218

219
	switch (threshold) {
220 221 222 223 224 225 226 227 228 229 230
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}
231

232
	if (params->flags & PLANE_HAS_FENCE) {
233
		dpfc_ctl |= DPFC_CTL_FENCE_EN;
234
		if (IS_GEN(dev_priv, 5))
235
			dpfc_ctl |= params->vma->fence->id;
236
		if (IS_GEN(dev_priv, 6)) {
237
			I915_WRITE(SNB_DPFC_CTL_SA,
238 239
				   SNB_CPU_FENCE_ENABLE |
				   params->vma->fence->id);
240 241 242 243
			I915_WRITE(DPFC_CPU_FENCE_OFFSET,
				   params->crtc.fence_y_offset);
		}
	} else {
244
		if (IS_GEN(dev_priv, 6)) {
245 246 247 248
			I915_WRITE(SNB_DPFC_CTL_SA, 0);
			I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
		}
	}
249

250
	I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
251 252 253
	/* enable it... */
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

254
	intel_fbc_recompress(dev_priv);
255 256
}

257
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
258 259 260 261 262 263 264 265 266 267 268
{
	u32 dpfc_ctl;

	/* Disable compression */
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
	}
}

269
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
270 271 272 273
{
	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}

274
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
275
{
276
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
277
	u32 dpfc_ctl;
278
	int threshold = dev_priv->fbc.threshold;
279

280
	/* Display WA #0529: skl, kbl, bxt. */
281
	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
282 283 284 285 286 287 288 289 290 291 292
		u32 val = I915_READ(CHICKEN_MISC_4);

		val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);

		if (i915_gem_object_get_tiling(params->vma->obj) !=
		    I915_TILING_X)
			val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;

		I915_WRITE(CHICKEN_MISC_4, val);
	}

293
	dpfc_ctl = 0;
294
	if (IS_IVYBRIDGE(dev_priv))
295
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
296

297
	if (params->fb.format->cpp[0] == 2)
298
		threshold++;
299

300
	switch (threshold) {
301 302 303 304 305 306 307 308 309 310 311 312
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}

313
	if (params->flags & PLANE_HAS_FENCE) {
314 315
		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
		I915_WRITE(SNB_DPFC_CTL_SA,
316 317
			   SNB_CPU_FENCE_ENABLE |
			   params->vma->fence->id);
318 319 320 321 322
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
	} else {
		I915_WRITE(SNB_DPFC_CTL_SA,0);
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
	}
323 324 325 326

	if (dev_priv->fbc.false_color)
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;

327
	if (IS_IVYBRIDGE(dev_priv)) {
328 329 330 331
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
			   ILK_FBCQ_DIS);
332
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
333
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
334 335
		I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
			   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
336 337 338
			   HSW_FBCQ_DIS);
	}

339 340
	if (INTEL_GEN(dev_priv) >= 11)
		/* Wa_1409120013:icl,ehl,tgl */
M
Matt Roper 已提交
341 342
		I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);

343 344
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

345
	intel_fbc_recompress(dev_priv);
346 347
}

348 349
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
350
	if (INTEL_GEN(dev_priv) >= 5)
351 352 353 354 355 356 357 358 359
		return ilk_fbc_is_active(dev_priv);
	else if (IS_GM45(dev_priv))
		return g4x_fbc_is_active(dev_priv);
	else
		return i8xx_fbc_is_active(dev_priv);
}

static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
360 361 362 363
	struct intel_fbc *fbc = &dev_priv->fbc;

	fbc->active = true;

364
	if (INTEL_GEN(dev_priv) >= 7)
365
		gen7_fbc_activate(dev_priv);
366
	else if (INTEL_GEN(dev_priv) >= 5)
367 368 369 370 371 372 373 374 375
		ilk_fbc_activate(dev_priv);
	else if (IS_GM45(dev_priv))
		g4x_fbc_activate(dev_priv);
	else
		i8xx_fbc_activate(dev_priv);
}

static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
{
376 377 378 379
	struct intel_fbc *fbc = &dev_priv->fbc;

	fbc->active = false;

380
	if (INTEL_GEN(dev_priv) >= 5)
381 382 383 384 385 386 387
		ilk_fbc_deactivate(dev_priv);
	else if (IS_GM45(dev_priv))
		g4x_fbc_deactivate(dev_priv);
	else
		i8xx_fbc_deactivate(dev_priv);
}

R
Rodrigo Vivi 已提交
388
/**
389
 * intel_fbc_is_active - Is FBC active?
390
 * @dev_priv: i915 device instance
R
Rodrigo Vivi 已提交
391 392
 *
 * This function is used to verify the current state of FBC.
D
Daniel Vetter 已提交
393
 *
R
Rodrigo Vivi 已提交
394
 * FIXME: This should be tracked in the plane config eventually
D
Daniel Vetter 已提交
395
 * instead of queried at runtime for most callers.
R
Rodrigo Vivi 已提交
396
 */
397
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
398
{
399
	return dev_priv->fbc.active;
400 401
}

402 403
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
				 const char *reason)
P
Paulo Zanoni 已提交
404
{
405 406 407
	struct intel_fbc *fbc = &dev_priv->fbc;

	WARN_ON(!mutex_is_locked(&fbc->lock));
P
Paulo Zanoni 已提交
408

409
	if (fbc->active)
410
		intel_fbc_hw_deactivate(dev_priv);
411 412

	fbc->no_fbc_reason = reason;
413 414
}

415
static int find_compression_threshold(struct drm_i915_private *dev_priv,
416 417 418 419 420 421
				      struct drm_mm_node *node,
				      int size,
				      int fb_cpp)
{
	int compression_threshold = 1;
	int ret;
422 423 424 425 426 427
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
428
	if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
429
		end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
430
	else
431
		end = U64_MAX;
432 433 434 435 436 437 438 439 440

	/* HACK: This code depends on what we will do in *_enable_fbc. If that
	 * code changes, this code needs to change as well.
	 *
	 * The enable_fbc code will attempt to use one of our 2 compression
	 * thresholds, therefore, in that case, we only have 1 resort.
	 */

	/* Try to over-allocate to reduce reallocations and fragmentation. */
441 442
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
						   4096, 0, end);
443 444 445 446 447 448 449 450 451
	if (ret == 0)
		return compression_threshold;

again:
	/* HW's ability to limit the CFB is 1:4 */
	if (compression_threshold > 4 ||
	    (fb_cpp == 2 && compression_threshold == 2))
		return 0;

452 453
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
						   4096, 0, end);
454
	if (ret && INTEL_GEN(dev_priv) <= 4) {
455 456 457 458 459 460 461 462 463
		return 0;
	} else if (ret) {
		compression_threshold <<= 1;
		goto again;
	} else {
		return compression_threshold;
	}
}

464
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
465
{
466
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
467
	struct intel_fbc *fbc = &dev_priv->fbc;
468
	struct drm_mm_node *uninitialized_var(compressed_llb);
469 470
	int size, fb_cpp, ret;

471
	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
472

473
	size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
474
	fb_cpp = fbc->state_cache.fb.format->cpp[0];
475

476
	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
477 478 479 480 481 482 483 484
					 size, fb_cpp);
	if (!ret)
		goto err_llb;
	else if (ret > 1) {
		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");

	}

485
	fbc->threshold = ret;
486

487
	if (INTEL_GEN(dev_priv) >= 5)
488
		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
489
	else if (IS_GM45(dev_priv)) {
490
		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
491 492 493 494 495 496 497 498 499 500
	} else {
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
		if (!compressed_llb)
			goto err_fb;

		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
						  4096, 4096);
		if (ret)
			goto err_fb;

501
		fbc->compressed_llb = compressed_llb;
502

503 504 505 506 507 508
		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
					     fbc->compressed_fb.start,
					     U32_MAX));
		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
					     fbc->compressed_llb->start,
					     U32_MAX));
509
		I915_WRITE(FBC_CFB_BASE,
510
			   dev_priv->dsm.start + fbc->compressed_fb.start);
511
		I915_WRITE(FBC_LL_BASE,
512
			   dev_priv->dsm.start + compressed_llb->start);
513 514
	}

515
	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
516
		      fbc->compressed_fb.size, fbc->threshold);
517 518 519 520 521

	return 0;

err_fb:
	kfree(compressed_llb);
522
	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
523
err_llb:
524 525
	if (drm_mm_initialized(&dev_priv->mm.stolen))
		pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
526 527 528
	return -ENOSPC;
}

529
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
530
{
531 532 533 534 535 536 537 538
	struct intel_fbc *fbc = &dev_priv->fbc;

	if (drm_mm_node_allocated(&fbc->compressed_fb))
		i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);

	if (fbc->compressed_llb) {
		i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
		kfree(fbc->compressed_llb);
539 540 541
	}
}

542
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
543
{
544 545
	struct intel_fbc *fbc = &dev_priv->fbc;

P
Paulo Zanoni 已提交
546
	if (!fbc_supported(dev_priv))
547 548
		return;

549
	mutex_lock(&fbc->lock);
550
	__intel_fbc_cleanup_cfb(dev_priv);
551
	mutex_unlock(&fbc->lock);
P
Paulo Zanoni 已提交
552 553
}

554 555 556
static bool stride_is_valid(struct drm_i915_private *dev_priv,
			    unsigned int stride)
{
557 558 559
	/* This should have been caught earlier. */
	if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
		return false;
560 561

	/* Below are the additional FBC restrictions. */
562 563
	if (stride < 512)
		return false;
564

565
	if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
566 567
		return stride == 4096 || stride == 8192;

568
	if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
569 570 571 572 573 574 575 576
		return false;

	if (stride > 16384)
		return false;

	return true;
}

577
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
578
				  u32 pixel_format)
579
{
580
	switch (pixel_format) {
581 582 583 584 585 586
	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_XBGR8888:
		return true;
	case DRM_FORMAT_XRGB1555:
	case DRM_FORMAT_RGB565:
		/* 16bpp not supported on gen2 */
587
		if (IS_GEN(dev_priv, 2))
588 589 590 591 592 593 594 595 596 597
			return false;
		/* WaFbcOnly1to1Ratio:ctg */
		if (IS_G4X(dev_priv))
			return false;
		return true;
	default:
		return false;
	}
}

598 599 600 601 602 603 604
/*
 * For some reason, the hardware tracking starts looking at whatever we
 * programmed as the display plane base address register. It does not look at
 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
 * variables instead of just looking at the pipe/plane size.
 */
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
605
{
606
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
607
	struct intel_fbc *fbc = &dev_priv->fbc;
608
	unsigned int effective_w, effective_h, max_w, max_h;
609

610 611 612 613
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
		max_w = 5120;
		max_h = 4096;
	} else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
614 615
		max_w = 4096;
		max_h = 4096;
616
	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
617 618 619 620 621 622 623
		max_w = 4096;
		max_h = 2048;
	} else {
		max_w = 2048;
		max_h = 1536;
	}

624 625
	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
					&effective_h);
626 627
	effective_w += fbc->state_cache.plane.adjusted_x;
	effective_h += fbc->state_cache.plane.adjusted_y;
628 629

	return effective_w <= max_w && effective_h <= max_h;
630 631
}

632
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
633 634
					 const struct intel_crtc_state *crtc_state,
					 const struct intel_plane_state *plane_state)
635
{
636
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
637
	struct intel_fbc *fbc = &dev_priv->fbc;
638
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
639
	struct drm_framebuffer *fb = plane_state->hw.fb;
640 641

	cache->vma = NULL;
642
	cache->flags = 0;
643

644
	cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
645
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
646
		cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
647

648
	cache->plane.rotation = plane_state->hw.rotation;
649 650 651 652 653
	/*
	 * Src coordinates are already rotated by 270 degrees for
	 * the 90/270 degree plane rotation cases (to match the
	 * GTT mapping), hence no need to account for rotation here.
	 */
654 655 656
	cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
	cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
	cache->plane.visible = plane_state->uapi.visible;
657 658
	cache->plane.adjusted_x = plane_state->color_plane[0].x;
	cache->plane.adjusted_y = plane_state->color_plane[0].y;
659
	cache->plane.y = plane_state->uapi.src.y1 >> 16;
660

661
	cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
662

663 664
	if (!cache->plane.visible)
		return;
665

666
	cache->fb.format = fb->format;
667
	cache->fb.stride = fb->pitches[0];
668 669

	cache->vma = plane_state->vma;
670 671 672
	cache->flags = plane_state->flags;
	if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
		cache->flags &= ~PLANE_HAS_FENCE;
673 674 675 676
}

static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
677
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
678 679 680
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;

681 682 683 684 685 686 687 688
	/* We don't need to use a state cache here since this information is
	 * global for all CRTC.
	 */
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

689
	if (!cache->vma) {
690
		fbc->no_fbc_reason = "primary plane not visible";
691 692
		return false;
	}
693

694
	if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
695
		fbc->no_fbc_reason = "incompatible mode";
696
		return false;
697 698
	}

699
	if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
700
		fbc->no_fbc_reason = "mode too large for compression";
701
		return false;
702
	}
703

704 705
	/* The use of a CPU fence is mandatory in order to detect writes
	 * by the CPU to the scanout and trigger updates to the FBC.
706 707 708 709
	 *
	 * Note that is possible for a tiled surface to be unmappable (and
	 * so have no fence associated with it) due to aperture constaints
	 * at the time of pinning.
710 711 712 713 714 715
	 *
	 * FIXME with 90/270 degree rotation we should use the fence on
	 * the normal GTT view (the rotated view doesn't even have a
	 * fence). Would need changes to the FBC fence Y offset as well.
	 * For now this will effecively disable FBC with 90/270 degree
	 * rotation.
716
	 */
717
	if (!(cache->flags & PLANE_HAS_FENCE)) {
718 719
		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
		return false;
720
	}
721
	if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
722
	    cache->plane.rotation != DRM_MODE_ROTATE_0) {
723
		fbc->no_fbc_reason = "rotation unsupported";
724
		return false;
725 726
	}

727
	if (!stride_is_valid(dev_priv, cache->fb.stride)) {
728
		fbc->no_fbc_reason = "framebuffer stride not supported";
729
		return false;
730 731
	}

732
	if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
733
		fbc->no_fbc_reason = "pixel format is invalid";
734
		return false;
735 736
	}

737 738 739 740 741 742
	if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
	    cache->fb.format->has_alpha) {
		fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
		return false;
	}

743 744
	/* WaFbcExceedCdClockThreshold:hsw,bdw */
	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
745
	    cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
746
		fbc->no_fbc_reason = "pixel rate is too big";
747
		return false;
748 749
	}

750 751 752 753 754 755 756 757 758 759
	/* It is possible for the required CFB size change without a
	 * crtc->disable + crtc->enable since it is possible to change the
	 * stride without triggering a full modeset. Since we try to
	 * over-allocate the CFB, there's a chance we may keep FBC enabled even
	 * if this happens, but if we exceed the current CFB size we'll have to
	 * disable FBC. Notice that it would be possible to disable FBC, wait
	 * for a frame, free the stolen node, then try to reenable FBC in case
	 * we didn't get any invalidate/deactivate calls, but this would require
	 * a lot of tracking just for a specific case. If we conclude it's an
	 * important case, we can implement it later. */
760
	if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
761
	    fbc->compressed_fb.size * fbc->threshold) {
762
		fbc->no_fbc_reason = "CFB requirements changed";
763 764 765
		return false;
	}

766 767 768 769 770
	/*
	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
	 * and screen flicker.
	 */
771
	if (IS_GEN_RANGE(dev_priv, 9, 10) &&
772 773 774 775 776
	    (fbc->state_cache.plane.adjusted_y & 3)) {
		fbc->no_fbc_reason = "plane Y offset is misaligned";
		return false;
	}

777 778 779
	return true;
}

780
static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
781
{
782
	struct intel_fbc *fbc = &dev_priv->fbc;
783

784
	if (intel_vgpu_active(dev_priv)) {
785
		fbc->no_fbc_reason = "VGPU is active";
786 787 788
		return false;
	}

789
	if (!i915_modparams.enable_fbc) {
790
		fbc->no_fbc_reason = "disabled per module param or by default";
791 792 793
		return false;
	}

794 795 796 797 798
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

799 800 801
	return true;
}

802 803 804
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
				     struct intel_fbc_reg_params *params)
{
805
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
806 807
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
808 809 810 811 812 813

	/* Since all our fields are integer types, use memset here so the
	 * comparison function can rely on memcmp because the padding will be
	 * zero. */
	memset(params, 0, sizeof(*params));

814
	params->vma = cache->vma;
815
	params->flags = cache->flags;
816

817
	params->crtc.pipe = crtc->pipe;
V
Ville Syrjälä 已提交
818
	params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
819
	params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
820

821
	params->fb.format = cache->fb.format;
822
	params->fb.stride = cache->fb.stride;
823

824
	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
825

826
	if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
827 828
		params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
						32 * fbc->threshold) * 8;
829 830
}

831
void intel_fbc_pre_update(struct intel_crtc *crtc,
832 833
			  const struct intel_crtc_state *crtc_state,
			  const struct intel_plane_state *plane_state)
834
{
835
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
836
	struct intel_fbc *fbc = &dev_priv->fbc;
837
	const char *reason = "update pending";
838

839 840 841 842
	if (!fbc_supported(dev_priv))
		return;

	mutex_lock(&fbc->lock);
843

844
	if (!fbc->enabled || fbc->crtc != crtc)
845
		goto unlock;
846

847
	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
848
	fbc->flip_pending = true;
849

850
	intel_fbc_deactivate(dev_priv, reason);
851 852
unlock:
	mutex_unlock(&fbc->lock);
853 854
}

855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
/**
 * __intel_fbc_disable - disable FBC
 * @dev_priv: i915 device instance
 *
 * This is the low level function that actually disables FBC. Callers should
 * grab the FBC lock.
 */
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_crtc *crtc = fbc->crtc;

	WARN_ON(!mutex_is_locked(&fbc->lock));
	WARN_ON(!fbc->enabled);
	WARN_ON(fbc->active);

	DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));

	__intel_fbc_cleanup_cfb(dev_priv);

	fbc->enabled = false;
	fbc->crtc = NULL;
}

879
static void __intel_fbc_post_update(struct intel_crtc *crtc)
880
{
881
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
882 883 884 885 886 887 888
	struct intel_fbc *fbc = &dev_priv->fbc;

	WARN_ON(!mutex_is_locked(&fbc->lock));

	if (!fbc->enabled || fbc->crtc != crtc)
		return;

889 890 891
	fbc->flip_pending = false;
	WARN_ON(fbc->active);

892 893 894 895 896 897 898
	if (!i915_modparams.enable_fbc) {
		intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
		__intel_fbc_disable(dev_priv);

		return;
	}

899
	intel_fbc_get_reg_params(crtc, &fbc->params);
900

901
	if (!intel_fbc_can_activate(crtc))
902 903
		return;

904 905
	if (!fbc->busy_bits) {
		intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
906
		intel_fbc_hw_activate(dev_priv);
907 908
	} else
		intel_fbc_deactivate(dev_priv, "frontbuffer write");
P
Paulo Zanoni 已提交
909 910
}

911
void intel_fbc_post_update(struct intel_crtc *crtc)
P
Paulo Zanoni 已提交
912
{
913
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
914
	struct intel_fbc *fbc = &dev_priv->fbc;
915

P
Paulo Zanoni 已提交
916
	if (!fbc_supported(dev_priv))
917 918
		return;

919
	mutex_lock(&fbc->lock);
920
	__intel_fbc_post_update(crtc);
921
	mutex_unlock(&fbc->lock);
922 923
}

924 925 926 927 928 929 930 931
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
	if (fbc->enabled)
		return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
	else
		return fbc->possible_framebuffer_bits;
}

932 933 934 935
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
			  unsigned int frontbuffer_bits,
			  enum fb_op_origin origin)
{
936
	struct intel_fbc *fbc = &dev_priv->fbc;
937

P
Paulo Zanoni 已提交
938
	if (!fbc_supported(dev_priv))
939 940
		return;

941
	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
942 943
		return;

944
	mutex_lock(&fbc->lock);
P
Paulo Zanoni 已提交
945

946
	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
947

948
	if (fbc->enabled && fbc->busy_bits)
949
		intel_fbc_deactivate(dev_priv, "frontbuffer write");
P
Paulo Zanoni 已提交
950

951
	mutex_unlock(&fbc->lock);
952 953 954
}

void intel_fbc_flush(struct drm_i915_private *dev_priv,
955
		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
956
{
957 958
	struct intel_fbc *fbc = &dev_priv->fbc;

P
Paulo Zanoni 已提交
959
	if (!fbc_supported(dev_priv))
960 961
		return;

962
	mutex_lock(&fbc->lock);
963

964
	fbc->busy_bits &= ~frontbuffer_bits;
965

966 967 968
	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
		goto out;

969 970
	if (!fbc->busy_bits && fbc->enabled &&
	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
971
		if (fbc->active)
972
			intel_fbc_recompress(dev_priv);
973
		else if (!fbc->flip_pending)
974
			__intel_fbc_post_update(fbc->crtc);
975
	}
P
Paulo Zanoni 已提交
976

977
out:
978
	mutex_unlock(&fbc->lock);
979 980
}

981 982 983 984 985 986 987 988 989 990 991 992 993
/**
 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
 * @dev_priv: i915 device instance
 * @state: the atomic state structure
 *
 * This function looks at the proposed state for CRTCs and planes, then chooses
 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
 * true.
 *
 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
 */
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
994
			   struct intel_atomic_state *state)
995 996
{
	struct intel_fbc *fbc = &dev_priv->fbc;
997 998
	struct intel_plane *plane;
	struct intel_plane_state *plane_state;
999
	bool crtc_chosen = false;
1000
	int i;
1001 1002 1003

	mutex_lock(&fbc->lock);

1004 1005
	/* Does this atomic commit involve the CRTC currently tied to FBC? */
	if (fbc->crtc &&
1006
	    !intel_atomic_get_new_crtc_state(state, fbc->crtc))
1007 1008
		goto out;

1009 1010 1011
	if (!intel_fbc_can_enable(dev_priv))
		goto out;

1012 1013 1014 1015
	/* Simply choose the first CRTC that is compatible and has a visible
	 * plane. We could go for fancier schemes such as checking the plane
	 * size, but this would just affect the few platforms that don't tie FBC
	 * to pipe or plane A. */
1016 1017
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_crtc_state *crtc_state;
1018
		struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1019

1020
		if (!plane->has_fbc)
1021 1022
			continue;

1023
		if (!plane_state->uapi.visible)
1024 1025
			continue;

1026
		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1027

1028
		crtc_state->enable_fbc = true;
1029
		crtc_chosen = true;
1030
		break;
1031 1032
	}

1033 1034 1035
	if (!crtc_chosen)
		fbc->no_fbc_reason = "no suitable CRTC for FBC";

1036 1037 1038 1039
out:
	mutex_unlock(&fbc->lock);
}

1040 1041 1042
/**
 * intel_fbc_enable: tries to enable FBC on the CRTC
 * @crtc: the CRTC
1043 1044
 * @crtc_state: corresponding &drm_crtc_state for @crtc
 * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
1045
 *
1046
 * This function checks if the given CRTC was chosen for FBC, then enables it if
1047 1048 1049
 * possible. Notice that it doesn't activate FBC. It is valid to call
 * intel_fbc_enable multiple times for the same pipe without an
 * intel_fbc_disable in the middle, as long as it is deactivated.
1050
 */
1051
void intel_fbc_enable(struct intel_crtc *crtc,
1052 1053
		      const struct intel_crtc_state *crtc_state,
		      const struct intel_plane_state *plane_state)
1054
{
1055
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1056
	struct intel_fbc *fbc = &dev_priv->fbc;
1057 1058 1059 1060

	if (!fbc_supported(dev_priv))
		return;

1061
	mutex_lock(&fbc->lock);
1062

1063
	if (fbc->enabled) {
1064 1065
		WARN_ON(fbc->crtc == NULL);
		if (fbc->crtc == crtc) {
1066
			WARN_ON(!crtc_state->enable_fbc);
1067 1068
			WARN_ON(fbc->active);
		}
1069 1070 1071
		goto out;
	}

1072
	if (!crtc_state->enable_fbc)
1073 1074
		goto out;

1075 1076
	WARN_ON(fbc->active);
	WARN_ON(fbc->crtc != NULL);
1077

1078
	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1079
	if (intel_fbc_alloc_cfb(crtc)) {
1080
		fbc->no_fbc_reason = "not enough stolen memory";
1081 1082 1083
		goto out;
	}

1084
	DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
1085
	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1086

1087 1088
	fbc->enabled = true;
	fbc->crtc = crtc;
1089
out:
1090
	mutex_unlock(&fbc->lock);
1091 1092 1093
}

/**
1094
 * intel_fbc_disable - disable FBC if it's associated with crtc
1095 1096 1097 1098
 * @crtc: the CRTC
 *
 * This function disables FBC if it's associated with the provided CRTC.
 */
1099
void intel_fbc_disable(struct intel_crtc *crtc)
1100
{
1101
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1102
	struct intel_fbc *fbc = &dev_priv->fbc;
1103 1104 1105 1106

	if (!fbc_supported(dev_priv))
		return;

1107
	mutex_lock(&fbc->lock);
1108
	if (fbc->crtc == crtc)
1109
		__intel_fbc_disable(dev_priv);
1110
	mutex_unlock(&fbc->lock);
1111 1112 1113
}

/**
1114
 * intel_fbc_global_disable - globally disable FBC
1115 1116 1117 1118
 * @dev_priv: i915 device instance
 *
 * This function disables FBC regardless of which CRTC is associated with it.
 */
1119
void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
1120
{
1121 1122
	struct intel_fbc *fbc = &dev_priv->fbc;

1123 1124 1125
	if (!fbc_supported(dev_priv))
		return;

1126
	mutex_lock(&fbc->lock);
1127 1128
	if (fbc->enabled) {
		WARN_ON(fbc->crtc->active);
1129
		__intel_fbc_disable(dev_priv);
1130
	}
1131
	mutex_unlock(&fbc->lock);
1132 1133
}

1134 1135 1136 1137 1138 1139 1140 1141 1142
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, fbc.underrun_work);
	struct intel_fbc *fbc = &dev_priv->fbc;

	mutex_lock(&fbc->lock);

	/* Maybe we were scheduled twice. */
1143
	if (fbc->underrun_detected || !fbc->enabled)
1144 1145 1146 1147 1148
		goto out;

	DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
	fbc->underrun_detected = true;

1149
	intel_fbc_deactivate(dev_priv, "FIFO underrun");
1150 1151 1152 1153
out:
	mutex_unlock(&fbc->lock);
}

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
/*
 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
 * @dev_priv: i915 device instance
 *
 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
 * want to re-enable FBC after an underrun to increase test coverage.
 */
int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
{
	int ret;

	cancel_work_sync(&dev_priv->fbc.underrun_work);

	ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
	if (ret)
		return ret;

	if (dev_priv->fbc.underrun_detected) {
		DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
		dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
	}

	dev_priv->fbc.underrun_detected = false;
	mutex_unlock(&dev_priv->fbc.lock);

	return 0;
}

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
/**
 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
 * @dev_priv: i915 device instance
 *
 * Without FBC, most underruns are harmless and don't really cause too many
 * problems, except for an annoying message on dmesg. With FBC, underruns can
 * become black screens or even worse, especially when paired with bad
 * watermarks. So in order for us to be on the safe side, completely disable FBC
 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
 * already suggests that watermarks may be bad, so try to be as safe as
 * possible.
 *
 * This function is called from the IRQ handler.
 */
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;

	if (!fbc_supported(dev_priv))
		return;

	/* There's no guarantee that underrun_detected won't be set to true
	 * right after this check and before the work is scheduled, but that's
	 * not a problem since we'll check it again under the work function
	 * while FBC is locked. This check here is just to prevent us from
	 * unnecessarily scheduling the work, and it relies on the fact that we
	 * never switch underrun_detect back to false after it's true. */
	if (READ_ONCE(fbc->underrun_detected))
		return;

	schedule_work(&fbc->underrun_work);
}

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
1226 1227
	if (i915_modparams.enable_fbc >= 0)
		return !!i915_modparams.enable_fbc;
1228

1229 1230 1231
	if (!HAS_FBC(dev_priv))
		return 0;

1232
	/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
1233
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
1234 1235
		return 0;

P
Paulo Zanoni 已提交
1236
	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
1237 1238 1239 1240 1241
		return 1;

	return 0;
}

1242 1243 1244
static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
{
	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1245
	if (intel_vtd_active() &&
1246 1247 1248 1249 1250 1251 1252 1253
	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
		DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
		return true;
	}

	return false;
}

R
Rodrigo Vivi 已提交
1254 1255 1256 1257 1258 1259
/**
 * intel_fbc_init - Initialize FBC
 * @dev_priv: the i915 device
 *
 * This function might be called during PM init process.
 */
1260 1261
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
1262
	struct intel_fbc *fbc = &dev_priv->fbc;
1263

1264
	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1265 1266 1267
	mutex_init(&fbc->lock);
	fbc->enabled = false;
	fbc->active = false;
P
Paulo Zanoni 已提交
1268

1269 1270 1271
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		mkwrite_device_info(dev_priv)->display.has_fbc = false;

1272
	if (need_fbc_vtd_wa(dev_priv))
1273
		mkwrite_device_info(dev_priv)->display.has_fbc = false;
1274

1275 1276 1277
	i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
	DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
		      i915_modparams.enable_fbc);
1278

1279
	if (!HAS_FBC(dev_priv)) {
1280
		fbc->no_fbc_reason = "unsupported by this chipset";
1281 1282 1283
		return;
	}

1284
	/* This value was pulled out of someone's hat */
1285
	if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
1286 1287
		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);

1288
	/* We still don't have any sort of hardware state readout for FBC, so
1289 1290
	 * deactivate it in case the BIOS activated it to make sure software
	 * matches the hardware state. */
1291 1292
	if (intel_fbc_hw_is_active(dev_priv))
		intel_fbc_hw_deactivate(dev_priv);
1293
}