intel_fbc.c 40.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29
/**
 * DOC: Frame Buffer Compression (FBC)
 *
 * FBC tries to save memory bandwidth (and so power consumption) by
 * compressing the amount of memory used by the display. It is total
 * transparent to user space and completely handled in the kernel.
30 31
 *
 * The benefits of FBC are mostly visible with solid backgrounds and
R
Rodrigo Vivi 已提交
32 33
 * variation-less patterns. It comes from keeping the memory footprint small
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
R
Rodrigo Vivi 已提交
35 36 37 38
 * i915 is responsible to reserve stolen memory for FBC and configure its
 * offset on proper registers. The hardware takes care of all
 * compress/decompress. However there are many known cases where we have to
 * forcibly disable it to allow proper screen updates.
39 40
 */

41 42
#include <drm/drm_fourcc.h>

R
Rodrigo Vivi 已提交
43
#include "i915_drv.h"
44
#include "i915_trace.h"
45
#include "i915_vgpu.h"
46
#include "intel_display_types.h"
47
#include "intel_fbc.h"
48
#include "intel_frontbuffer.h"
R
Rodrigo Vivi 已提交
49

50 51 52 53 54 55 56 57
/*
 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
 * origin so the x and y offsets can actually fit the registers. As a
 * consequence, the fence doesn't really start exactly at the display plane
 * address we program because it starts at the real start of the buffer, so we
 * have to take this into consideration here.
 */
58
static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
59
{
60
	return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
61 62
}

63 64 65 66 67
/*
 * For SKL+, the plane source size used by the hardware is based on the value we
 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
 * we wrote to PIPESRC.
 */
68
static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache,
69 70 71
					    int *width, int *height)
{
	if (width)
72
		*width = cache->plane.src_w;
73
	if (height)
74
		*height = cache->plane.src_h;
75 76
}

77
static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
78
					const struct intel_fbc_state_cache *cache)
79 80 81
{
	int lines;

82
	intel_fbc_get_plane_source_size(cache, NULL, &lines);
83
	if (IS_GEN(dev_priv, 7))
84
		lines = min(lines, 2048);
85 86
	else if (INTEL_GEN(dev_priv) >= 8)
		lines = min(lines, 2560);
87 88

	/* Hardware needs the full buffer stride, not just the active area. */
89
	return lines * cache->fb.stride;
90 91
}

92
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
93 94 95 96
{
	u32 fbc_ctl;

	/* Disable compression */
97
	fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
98 99 100 101
	if ((fbc_ctl & FBC_CTL_EN) == 0)
		return;

	fbc_ctl &= ~FBC_CTL_EN;
102
	intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
103 104

	/* Wait for compressing bit to clear */
105 106
	if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
				    FBC_STAT_COMPRESSING, 10)) {
107
		drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n");
108 109 110 111
		return;
	}
}

112
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
113
{
114
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
115 116 117 118
	int cfb_pitch;
	int i;
	u32 fbc_ctl;

119
	/* Note: fbc.threshold == 1 for i8xx */
120 121 122
	cfb_pitch = params->cfb_size / FBC_LL_SIZE;
	if (params->fb.stride < cfb_pitch)
		cfb_pitch = params->fb.stride;
123 124

	/* FBC_CTL wants 32B or 64B units */
125
	if (IS_GEN(dev_priv, 2))
126 127 128 129 130 131
		cfb_pitch = (cfb_pitch / 32) - 1;
	else
		cfb_pitch = (cfb_pitch / 64) - 1;

	/* Clear old tags */
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
132
		intel_de_write(dev_priv, FBC_TAG(i), 0);
133

134
	if (IS_GEN(dev_priv, 4)) {
135 136 137
		u32 fbc_ctl2;

		/* Set it up... */
138
		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM;
139
		fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
140 141
		if (params->fence_id >= 0)
			fbc_ctl2 |= FBC_CTL_CPU_FENCE;
142 143 144
		intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
		intel_de_write(dev_priv, FBC_FENCE_OFF,
			       params->crtc.fence_y_offset);
145 146 147
	}

	/* enable it... */
148
	fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
149 150
	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
151
	if (IS_I945GM(dev_priv))
152 153
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
154 155
	if (params->fence_id >= 0)
		fbc_ctl |= params->fence_id;
156
	intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
157 158
}

159
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
160
{
161
	return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
162 163
}

164
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
165
{
166
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
167 168
	u32 dpfc_ctl;

169
	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
170
	if (params->fb.format->cpp[0] == 2)
171 172 173 174
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
	else
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;

175 176
	if (params->fence_id >= 0) {
		dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
177 178
		intel_de_write(dev_priv, DPFC_FENCE_YOFF,
			       params->crtc.fence_y_offset);
179
	} else {
180
		intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
181
	}
182 183

	/* enable it... */
184
	intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
185 186
}

187
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
188 189 190 191
{
	u32 dpfc_ctl;

	/* Disable compression */
192
	dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL);
193 194
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
195
		intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl);
196 197 198
	}
}

199
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
200
{
201
	return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
202 203
}

204 205
/* This function forces a CFB recompression through the nuke operation. */
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
206
{
207 208 209 210
	struct intel_fbc *fbc = &dev_priv->fbc;

	trace_intel_fbc_nuke(fbc->crtc);

211 212
	intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
	intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
213 214
}

215
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
216
{
217
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
218
	u32 dpfc_ctl;
219
	int threshold = dev_priv->fbc.threshold;
220

221
	dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
222
	if (params->fb.format->cpp[0] == 2)
223
		threshold++;
224

225
	switch (threshold) {
226 227 228 229 230 231 232 233 234 235 236
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}
237

238
	if (params->fence_id >= 0) {
239
		dpfc_ctl |= DPFC_CTL_FENCE_EN;
240
		if (IS_GEN(dev_priv, 5))
241
			dpfc_ctl |= params->fence_id;
242
		if (IS_GEN(dev_priv, 6)) {
243 244 245 246
			intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
				       SNB_CPU_FENCE_ENABLE | params->fence_id);
			intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
				       params->crtc.fence_y_offset);
247 248
		}
	} else {
249
		if (IS_GEN(dev_priv, 6)) {
250 251
			intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
			intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
252 253
		}
	}
254

255 256
	intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
		       params->crtc.fence_y_offset);
257
	/* enable it... */
258
	intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
259

260
	intel_fbc_recompress(dev_priv);
261 262
}

263
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
264 265 266 267
{
	u32 dpfc_ctl;

	/* Disable compression */
268
	dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
269 270
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
271
		intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl);
272 273 274
	}
}

275
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
276
{
277
	return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
278 279
}

280
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
281
{
282
	struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
283
	u32 dpfc_ctl;
284
	int threshold = dev_priv->fbc.threshold;
285

286
	/* Display WA #0529: skl, kbl, bxt. */
287
	if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
288
		u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4);
289 290 291

		val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);

292
		if (params->gen9_wa_cfb_stride)
293 294
			val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;

295
		intel_de_write(dev_priv, CHICKEN_MISC_4, val);
296 297
	}

298
	dpfc_ctl = 0;
299
	if (IS_IVYBRIDGE(dev_priv))
300
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
301

302
	if (params->fb.format->cpp[0] == 2)
303
		threshold++;
304

305
	switch (threshold) {
306 307 308 309 310 311 312 313 314 315 316 317
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}

318
	if (params->fence_id >= 0) {
319
		dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
320 321 322 323
		intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
			       SNB_CPU_FENCE_ENABLE | params->fence_id);
		intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
			       params->crtc.fence_y_offset);
324
	} else if (dev_priv->ggtt.num_fences) {
325 326
		intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
		intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
327
	}
328 329 330 331

	if (dev_priv->fbc.false_color)
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;

332
	if (IS_IVYBRIDGE(dev_priv)) {
333
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
334 335
		intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
			       intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
336
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
337
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
338 339
		intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
			       intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
340 341
	}

342 343
	if (INTEL_GEN(dev_priv) >= 11)
		/* Wa_1409120013:icl,ehl,tgl */
344 345
		intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
			       ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
M
Matt Roper 已提交
346

347
	intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
348

349
	intel_fbc_recompress(dev_priv);
350 351
}

352 353
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
{
354
	if (INTEL_GEN(dev_priv) >= 5)
355 356 357 358 359 360 361 362 363
		return ilk_fbc_is_active(dev_priv);
	else if (IS_GM45(dev_priv))
		return g4x_fbc_is_active(dev_priv);
	else
		return i8xx_fbc_is_active(dev_priv);
}

static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
364 365
	struct intel_fbc *fbc = &dev_priv->fbc;

366 367
	trace_intel_fbc_activate(fbc->crtc);

368
	fbc->active = true;
369
	fbc->activated = true;
370

371
	if (INTEL_GEN(dev_priv) >= 7)
372
		gen7_fbc_activate(dev_priv);
373
	else if (INTEL_GEN(dev_priv) >= 5)
374 375 376 377 378 379 380 381 382
		ilk_fbc_activate(dev_priv);
	else if (IS_GM45(dev_priv))
		g4x_fbc_activate(dev_priv);
	else
		i8xx_fbc_activate(dev_priv);
}

static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
{
383 384
	struct intel_fbc *fbc = &dev_priv->fbc;

385 386
	trace_intel_fbc_deactivate(fbc->crtc);

387 388
	fbc->active = false;

389
	if (INTEL_GEN(dev_priv) >= 5)
390 391 392 393 394 395 396
		ilk_fbc_deactivate(dev_priv);
	else if (IS_GM45(dev_priv))
		g4x_fbc_deactivate(dev_priv);
	else
		i8xx_fbc_deactivate(dev_priv);
}

R
Rodrigo Vivi 已提交
397
/**
398
 * intel_fbc_is_active - Is FBC active?
399
 * @dev_priv: i915 device instance
R
Rodrigo Vivi 已提交
400 401
 *
 * This function is used to verify the current state of FBC.
D
Daniel Vetter 已提交
402
 *
R
Rodrigo Vivi 已提交
403
 * FIXME: This should be tracked in the plane config eventually
D
Daniel Vetter 已提交
404
 * instead of queried at runtime for most callers.
R
Rodrigo Vivi 已提交
405
 */
406
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
407
{
408
	return dev_priv->fbc.active;
409 410
}

411 412
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
				 const char *reason)
P
Paulo Zanoni 已提交
413
{
414 415
	struct intel_fbc *fbc = &dev_priv->fbc;

416
	drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
P
Paulo Zanoni 已提交
417

418
	if (fbc->active)
419
		intel_fbc_hw_deactivate(dev_priv);
420 421

	fbc->no_fbc_reason = reason;
422 423
}

424
static int find_compression_threshold(struct drm_i915_private *dev_priv,
425
				      struct drm_mm_node *node,
426 427
				      unsigned int size,
				      unsigned int fb_cpp)
428 429 430
{
	int compression_threshold = 1;
	int ret;
431 432 433 434 435 436
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
437
	if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
438
		end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
439
	else
440
		end = U64_MAX;
441 442 443 444 445 446 447 448 449

	/* HACK: This code depends on what we will do in *_enable_fbc. If that
	 * code changes, this code needs to change as well.
	 *
	 * The enable_fbc code will attempt to use one of our 2 compression
	 * thresholds, therefore, in that case, we only have 1 resort.
	 */

	/* Try to over-allocate to reduce reallocations and fragmentation. */
450 451
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
						   4096, 0, end);
452 453 454 455 456 457 458 459 460
	if (ret == 0)
		return compression_threshold;

again:
	/* HW's ability to limit the CFB is 1:4 */
	if (compression_threshold > 4 ||
	    (fb_cpp == 2 && compression_threshold == 2))
		return 0;

461 462
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
						   4096, 0, end);
463
	if (ret && INTEL_GEN(dev_priv) <= 4) {
464 465 466 467 468 469 470 471 472
		return 0;
	} else if (ret) {
		compression_threshold <<= 1;
		goto again;
	} else {
		return compression_threshold;
	}
}

473 474
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
			       unsigned int size, unsigned int fb_cpp)
475
{
476
	struct intel_fbc *fbc = &dev_priv->fbc;
477
	struct drm_mm_node *uninitialized_var(compressed_llb);
478
	int ret;
479

480 481
	drm_WARN_ON(&dev_priv->drm,
		    drm_mm_node_allocated(&fbc->compressed_fb));
482

483
	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
484 485 486 487
					 size, fb_cpp);
	if (!ret)
		goto err_llb;
	else if (ret > 1) {
488 489
		drm_info_once(&dev_priv->drm,
			      "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
490 491
	}

492
	fbc->threshold = ret;
493

494
	if (INTEL_GEN(dev_priv) >= 5)
495 496
		intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
			       fbc->compressed_fb.start);
497
	else if (IS_GM45(dev_priv)) {
498 499
		intel_de_write(dev_priv, DPFC_CB_BASE,
			       fbc->compressed_fb.start);
500 501 502 503 504 505 506 507 508 509
	} else {
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
		if (!compressed_llb)
			goto err_fb;

		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
						  4096, 4096);
		if (ret)
			goto err_fb;

510
		fbc->compressed_llb = compressed_llb;
511

512 513 514 515 516 517
		GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
						 fbc->compressed_fb.start,
						 U32_MAX));
		GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
						 fbc->compressed_llb->start,
						 U32_MAX));
518 519 520 521
		intel_de_write(dev_priv, FBC_CFB_BASE,
			       dev_priv->dsm.start + fbc->compressed_fb.start);
		intel_de_write(dev_priv, FBC_LL_BASE,
			       dev_priv->dsm.start + compressed_llb->start);
522 523
	}

524 525 526
	drm_dbg_kms(&dev_priv->drm,
		    "reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
		    fbc->compressed_fb.size, fbc->threshold);
527 528 529 530 531

	return 0;

err_fb:
	kfree(compressed_llb);
532
	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
533
err_llb:
534
	if (drm_mm_initialized(&dev_priv->mm.stolen))
535
		drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
536 537 538
	return -ENOSPC;
}

539
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
540
{
541 542
	struct intel_fbc *fbc = &dev_priv->fbc;

543 544 545
	if (WARN_ON(intel_fbc_hw_is_active(dev_priv)))
		return;

546 547
	if (!drm_mm_node_allocated(&fbc->compressed_fb))
		return;
548 549 550 551

	if (fbc->compressed_llb) {
		i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
		kfree(fbc->compressed_llb);
552
	}
553 554

	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
555 556
}

557
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
558
{
559 560
	struct intel_fbc *fbc = &dev_priv->fbc;

561
	if (!HAS_FBC(dev_priv))
562 563
		return;

564
	mutex_lock(&fbc->lock);
565
	__intel_fbc_cleanup_cfb(dev_priv);
566
	mutex_unlock(&fbc->lock);
P
Paulo Zanoni 已提交
567 568
}

569
static bool stride_is_valid(struct drm_i915_private *dev_priv,
570
			    u64 modifier, unsigned int stride)
571
{
572
	/* This should have been caught earlier. */
573
	if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
574
		return false;
575 576

	/* Below are the additional FBC restrictions. */
577 578
	if (stride < 512)
		return false;
579

580
	if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
581 582
		return stride == 4096 || stride == 8192;

583
	if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
584 585
		return false;

586 587 588 589 590
	/* Display WA #1105: skl,bxt,kbl,cfl,glk */
	if (IS_GEN(dev_priv, 9) &&
	    modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
		return false;

591 592 593 594 595 596
	if (stride > 16384)
		return false;

	return true;
}

597
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
598
				  u32 pixel_format)
599
{
600
	switch (pixel_format) {
601 602 603 604 605 606
	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_XBGR8888:
		return true;
	case DRM_FORMAT_XRGB1555:
	case DRM_FORMAT_RGB565:
		/* 16bpp not supported on gen2 */
607
		if (IS_GEN(dev_priv, 2))
608 609 610 611 612 613 614 615 616 617
			return false;
		/* WaFbcOnly1to1Ratio:ctg */
		if (IS_G4X(dev_priv))
			return false;
		return true;
	default:
		return false;
	}
}

618 619 620 621 622 623 624 625 626 627 628 629 630
static bool rotation_is_valid(struct drm_i915_private *dev_priv,
			      u32 pixel_format, unsigned int rotation)
{
	if (INTEL_GEN(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
	    drm_rotation_90_or_270(rotation))
		return false;
	else if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
		 rotation != DRM_MODE_ROTATE_0)
		return false;

	return true;
}

631 632 633 634 635 636 637
/*
 * For some reason, the hardware tracking starts looking at whatever we
 * programmed as the display plane base address register. It does not look at
 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
 * variables instead of just looking at the pipe/plane size.
 */
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
638
{
639
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
640
	struct intel_fbc *fbc = &dev_priv->fbc;
641
	unsigned int effective_w, effective_h, max_w, max_h;
642

643 644 645 646
	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
		max_w = 5120;
		max_h = 4096;
	} else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
647 648
		max_w = 4096;
		max_h = 4096;
649
	} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
650 651 652 653 654 655 656
		max_w = 4096;
		max_h = 2048;
	} else {
		max_w = 2048;
		max_h = 1536;
	}

657 658
	intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
					&effective_h);
659 660
	effective_w += fbc->state_cache.plane.adjusted_x;
	effective_h += fbc->state_cache.plane.adjusted_y;
661 662

	return effective_w <= max_w && effective_h <= max_h;
663 664
}

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
static bool tiling_is_valid(struct drm_i915_private *dev_priv,
			    uint64_t modifier)
{
	switch (modifier) {
	case DRM_FORMAT_MOD_LINEAR:
		if (INTEL_GEN(dev_priv) >= 9)
			return true;
		return false;
	case I915_FORMAT_MOD_X_TILED:
	case I915_FORMAT_MOD_Y_TILED:
		return true;
	default:
		return false;
	}
}

681
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
682 683
					 const struct intel_crtc_state *crtc_state,
					 const struct intel_plane_state *plane_state)
684
{
685
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
686
	struct intel_fbc *fbc = &dev_priv->fbc;
687
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
688
	struct drm_framebuffer *fb = plane_state->hw.fb;
689

690 691 692
	cache->plane.visible = plane_state->uapi.visible;
	if (!cache->plane.visible)
		return;
693

694
	cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
695
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
696
		cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
697

698
	cache->plane.rotation = plane_state->hw.rotation;
699 700 701 702 703
	/*
	 * Src coordinates are already rotated by 270 degrees for
	 * the 90/270 degree plane rotation cases (to match the
	 * GTT mapping), hence no need to account for rotation here.
	 */
704 705
	cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
	cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
706 707
	cache->plane.adjusted_x = plane_state->color_plane[0].x;
	cache->plane.adjusted_y = plane_state->color_plane[0].y;
708
	cache->plane.y = plane_state->uapi.src.y1 >> 16;
709

710
	cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
711

712
	cache->fb.format = fb->format;
713
	cache->fb.stride = fb->pitches[0];
714
	cache->fb.modifier = fb->modifier;
715

716 717
	drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
		    !plane_state->vma->fence);
718 719 720 721 722 723

	if (plane_state->flags & PLANE_HAS_FENCE &&
	    plane_state->vma->fence)
		cache->fence_id = plane_state->vma->fence->id;
	else
		cache->fence_id = -1;
724 725
}

726 727 728 729 730 731 732 733
static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;

	return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
		fbc->compressed_fb.size * fbc->threshold;
}

734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;

	if (intel_vgpu_active(dev_priv)) {
		fbc->no_fbc_reason = "VGPU is active";
		return false;
	}

	if (!i915_modparams.enable_fbc) {
		fbc->no_fbc_reason = "disabled per module param or by default";
		return false;
	}

	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

	return true;
}

756 757
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
758
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
759 760 761
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;

762 763 764
	if (!intel_fbc_can_enable(dev_priv))
		return false;

765 766 767 768 769
	if (!cache->plane.visible) {
		fbc->no_fbc_reason = "primary plane not visible";
		return false;
	}

770 771 772 773 774 775 776 777
	/* We don't need to use a state cache here since this information is
	 * global for all CRTC.
	 */
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

778
	if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
779
		fbc->no_fbc_reason = "incompatible mode";
780
		return false;
781 782
	}

783
	if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
784
		fbc->no_fbc_reason = "mode too large for compression";
785
		return false;
786
	}
787

788 789 790 791 792 793
	/* The use of a CPU fence is one of two ways to detect writes by the
	 * CPU to the scanout and trigger updates to the FBC.
	 *
	 * The other method is by software tracking (see
	 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
	 * the current compressed buffer and recompress it.
794 795
	 *
	 * Note that is possible for a tiled surface to be unmappable (and
796
	 * so have no fence associated with it) due to aperture constraints
797
	 * at the time of pinning.
798 799 800 801
	 *
	 * FIXME with 90/270 degree rotation we should use the fence on
	 * the normal GTT view (the rotated view doesn't even have a
	 * fence). Would need changes to the FBC fence Y offset as well.
802
	 * For now this will effectively disable FBC with 90/270 degree
803
	 * rotation.
804
	 */
805
	if (INTEL_GEN(dev_priv) < 9 && cache->fence_id < 0) {
806 807
		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
		return false;
808
	}
809 810 811

	if (!rotation_is_valid(dev_priv, cache->fb.format->format,
			       cache->plane.rotation)) {
812
		fbc->no_fbc_reason = "rotation unsupported";
813
		return false;
814 815
	}

816 817 818 819 820
	if (!tiling_is_valid(dev_priv, cache->fb.modifier)) {
		fbc->no_fbc_reason = "tiling unsupported";
		return false;
	}

821
	if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) {
822
		fbc->no_fbc_reason = "framebuffer stride not supported";
823
		return false;
824 825
	}

826
	if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
827
		fbc->no_fbc_reason = "pixel format is invalid";
828
		return false;
829 830
	}

831 832 833 834 835 836
	if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
	    cache->fb.format->has_alpha) {
		fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
		return false;
	}

837 838
	/* WaFbcExceedCdClockThreshold:hsw,bdw */
	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
839
	    cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
840
		fbc->no_fbc_reason = "pixel rate is too big";
841
		return false;
842 843
	}

844 845 846 847 848 849 850 851 852 853
	/* It is possible for the required CFB size change without a
	 * crtc->disable + crtc->enable since it is possible to change the
	 * stride without triggering a full modeset. Since we try to
	 * over-allocate the CFB, there's a chance we may keep FBC enabled even
	 * if this happens, but if we exceed the current CFB size we'll have to
	 * disable FBC. Notice that it would be possible to disable FBC, wait
	 * for a frame, free the stolen node, then try to reenable FBC in case
	 * we didn't get any invalidate/deactivate calls, but this would require
	 * a lot of tracking just for a specific case. If we conclude it's an
	 * important case, we can implement it later. */
854
	if (intel_fbc_cfb_size_changed(dev_priv)) {
855
		fbc->no_fbc_reason = "CFB requirements changed";
856 857 858
		return false;
	}

859 860 861 862 863
	/*
	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
	 * and screen flicker.
	 */
864
	if (INTEL_GEN(dev_priv) >= 9 &&
865 866 867 868 869
	    (fbc->state_cache.plane.adjusted_y & 3)) {
		fbc->no_fbc_reason = "plane Y offset is misaligned";
		return false;
	}

870 871 872
	return true;
}

873 874 875
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
				     struct intel_fbc_reg_params *params)
{
876
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
877 878
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
879 880 881 882 883 884

	/* Since all our fields are integer types, use memset here so the
	 * comparison function can rely on memcmp because the padding will be
	 * zero. */
	memset(params, 0, sizeof(*params));

885
	params->fence_id = cache->fence_id;
886

887
	params->crtc.pipe = crtc->pipe;
V
Ville Syrjälä 已提交
888
	params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
889
	params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
890

891
	params->fb.format = cache->fb.format;
892
	params->fb.stride = cache->fb.stride;
893

894
	params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
895

896
	params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride;
897 898

	params->plane_visible = cache->plane.visible;
899 900
}

901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
{
	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	const struct intel_fbc *fbc = &dev_priv->fbc;
	const struct intel_fbc_state_cache *cache = &fbc->state_cache;
	const struct intel_fbc_reg_params *params = &fbc->params;

	if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
		return false;

	if (!params->plane_visible)
		return false;

	if (!intel_fbc_can_activate(crtc))
		return false;

	if (params->fb.format != cache->fb.format)
		return false;

	if (params->fb.stride != cache->fb.stride)
		return false;

	if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache))
		return false;

	if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride)
		return false;

	return true;
}

933 934
bool intel_fbc_pre_update(struct intel_atomic_state *state,
			  struct intel_crtc *crtc)
935
{
936 937 938 939 940
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
941
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
942
	struct intel_fbc *fbc = &dev_priv->fbc;
943
	const char *reason = "update pending";
944
	bool need_vblank_wait = false;
945

946
	if (!plane->has_fbc || !plane_state)
947 948
		return need_vblank_wait;

949
	mutex_lock(&fbc->lock);
950

V
Ville Syrjälä 已提交
951
	if (fbc->crtc != crtc)
952
		goto unlock;
953

954
	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
955
	fbc->flip_pending = true;
956

957
	if (!intel_fbc_can_flip_nuke(crtc_state)) {
958
		intel_fbc_deactivate(dev_priv, reason);
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977

		/*
		 * Display WA #1198: glk+
		 * Need an extra vblank wait between FBC disable and most plane
		 * updates. Bspec says this is only needed for plane disable, but
		 * that is not true. Touching most plane registers will cause the
		 * corruption to appear. Also SKL/derivatives do not seem to be
		 * affected.
		 *
		 * TODO: could optimize this a bit by sampling the frame
		 * counter when we disable FBC (if it was already done earlier)
		 * and skipping the extra vblank wait before the plane update
		 * if at least one frame has already passed.
		 */
		if (fbc->activated &&
		    (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)))
			need_vblank_wait = true;
		fbc->activated = false;
	}
978 979
unlock:
	mutex_unlock(&fbc->lock);
980 981

	return need_vblank_wait;
982 983
}

984 985 986 987 988 989 990 991 992 993 994 995
/**
 * __intel_fbc_disable - disable FBC
 * @dev_priv: i915 device instance
 *
 * This is the low level function that actually disables FBC. Callers should
 * grab the FBC lock.
 */
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_crtc *crtc = fbc->crtc;

996 997 998
	drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
	drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
	drm_WARN_ON(&dev_priv->drm, fbc->active);
999

1000 1001
	drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n",
		    pipe_name(crtc->pipe));
1002 1003 1004 1005 1006 1007

	__intel_fbc_cleanup_cfb(dev_priv);

	fbc->crtc = NULL;
}

1008
static void __intel_fbc_post_update(struct intel_crtc *crtc)
1009
{
1010
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1011 1012
	struct intel_fbc *fbc = &dev_priv->fbc;

1013
	drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
1014

V
Ville Syrjälä 已提交
1015
	if (fbc->crtc != crtc)
1016 1017
		return;

1018 1019
	fbc->flip_pending = false;

1020 1021 1022 1023 1024 1025 1026
	if (!i915_modparams.enable_fbc) {
		intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
		__intel_fbc_disable(dev_priv);

		return;
	}

1027
	intel_fbc_get_reg_params(crtc, &fbc->params);
1028

1029
	if (!intel_fbc_can_activate(crtc))
1030 1031
		return;

1032
	if (!fbc->busy_bits)
1033
		intel_fbc_hw_activate(dev_priv);
1034
	else
1035
		intel_fbc_deactivate(dev_priv, "frontbuffer write");
P
Paulo Zanoni 已提交
1036 1037
}

1038 1039
void intel_fbc_post_update(struct intel_atomic_state *state,
			   struct intel_crtc *crtc)
P
Paulo Zanoni 已提交
1040
{
1041
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1042 1043 1044
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
1045
	struct intel_fbc *fbc = &dev_priv->fbc;
1046

1047
	if (!plane->has_fbc || !plane_state)
1048 1049
		return;

1050
	mutex_lock(&fbc->lock);
1051
	__intel_fbc_post_update(crtc);
1052
	mutex_unlock(&fbc->lock);
1053 1054
}

1055 1056
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
V
Ville Syrjälä 已提交
1057
	if (fbc->crtc)
1058 1059 1060 1061 1062
		return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
	else
		return fbc->possible_framebuffer_bits;
}

1063 1064 1065 1066
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
			  unsigned int frontbuffer_bits,
			  enum fb_op_origin origin)
{
1067
	struct intel_fbc *fbc = &dev_priv->fbc;
1068

1069
	if (!HAS_FBC(dev_priv))
1070 1071
		return;

1072
	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
1073 1074
		return;

1075
	mutex_lock(&fbc->lock);
P
Paulo Zanoni 已提交
1076

1077
	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
1078

V
Ville Syrjälä 已提交
1079
	if (fbc->crtc && fbc->busy_bits)
1080
		intel_fbc_deactivate(dev_priv, "frontbuffer write");
P
Paulo Zanoni 已提交
1081

1082
	mutex_unlock(&fbc->lock);
1083 1084 1085
}

void intel_fbc_flush(struct drm_i915_private *dev_priv,
1086
		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
1087
{
1088 1089
	struct intel_fbc *fbc = &dev_priv->fbc;

1090
	if (!HAS_FBC(dev_priv))
1091 1092
		return;

1093
	mutex_lock(&fbc->lock);
1094

1095
	fbc->busy_bits &= ~frontbuffer_bits;
1096

1097 1098 1099
	if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
		goto out;

V
Ville Syrjälä 已提交
1100
	if (!fbc->busy_bits && fbc->crtc &&
1101
	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1102
		if (fbc->active)
1103
			intel_fbc_recompress(dev_priv);
1104
		else if (!fbc->flip_pending)
1105
			__intel_fbc_post_update(fbc->crtc);
1106
	}
P
Paulo Zanoni 已提交
1107

1108
out:
1109
	mutex_unlock(&fbc->lock);
1110 1111
}

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
/**
 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
 * @dev_priv: i915 device instance
 * @state: the atomic state structure
 *
 * This function looks at the proposed state for CRTCs and planes, then chooses
 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
 * true.
 *
 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
 */
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1125
			   struct intel_atomic_state *state)
1126 1127
{
	struct intel_fbc *fbc = &dev_priv->fbc;
1128 1129
	struct intel_plane *plane;
	struct intel_plane_state *plane_state;
1130
	bool crtc_chosen = false;
1131
	int i;
1132 1133 1134

	mutex_lock(&fbc->lock);

1135 1136
	/* Does this atomic commit involve the CRTC currently tied to FBC? */
	if (fbc->crtc &&
1137
	    !intel_atomic_get_new_crtc_state(state, fbc->crtc))
1138 1139
		goto out;

1140 1141 1142
	if (!intel_fbc_can_enable(dev_priv))
		goto out;

1143 1144 1145 1146
	/* Simply choose the first CRTC that is compatible and has a visible
	 * plane. We could go for fancier schemes such as checking the plane
	 * size, but this would just affect the few platforms that don't tie FBC
	 * to pipe or plane A. */
1147 1148
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_crtc_state *crtc_state;
1149
		struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1150

1151
		if (!plane->has_fbc)
1152 1153
			continue;

1154
		if (!plane_state->uapi.visible)
1155 1156
			continue;

1157
		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1158

1159
		crtc_state->enable_fbc = true;
1160
		crtc_chosen = true;
1161
		break;
1162 1163
	}

1164 1165 1166
	if (!crtc_chosen)
		fbc->no_fbc_reason = "no suitable CRTC for FBC";

1167 1168 1169 1170
out:
	mutex_unlock(&fbc->lock);
}

1171 1172 1173
/**
 * intel_fbc_enable: tries to enable FBC on the CRTC
 * @crtc: the CRTC
1174
 * @state: corresponding &drm_crtc_state for @crtc
1175
 *
1176
 * This function checks if the given CRTC was chosen for FBC, then enables it if
1177 1178 1179
 * possible. Notice that it doesn't activate FBC. It is valid to call
 * intel_fbc_enable multiple times for the same pipe without an
 * intel_fbc_disable in the middle, as long as it is deactivated.
1180
 */
1181 1182
void intel_fbc_enable(struct intel_atomic_state *state,
		      struct intel_crtc *crtc)
1183
{
1184
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1185 1186 1187 1188 1189
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
1190
	struct intel_fbc *fbc = &dev_priv->fbc;
1191
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
1192

1193
	if (!plane->has_fbc || !plane_state)
1194 1195
		return;

1196
	mutex_lock(&fbc->lock);
1197

V
Ville Syrjälä 已提交
1198
	if (fbc->crtc) {
1199 1200 1201
		if (fbc->crtc != crtc ||
		    !intel_fbc_cfb_size_changed(dev_priv))
			goto out;
1202

1203 1204
		__intel_fbc_disable(dev_priv);
	}
1205

1206
	drm_WARN_ON(&dev_priv->drm, fbc->active);
1207

1208
	intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1209 1210 1211 1212 1213 1214 1215

	/* FIXME crtc_state->enable_fbc lies :( */
	if (!cache->plane.visible)
		goto out;

	if (intel_fbc_alloc_cfb(dev_priv,
				intel_fbc_calculate_cfb_size(dev_priv, cache),
1216
				plane_state->hw.fb->format->cpp[0])) {
1217
		cache->plane.visible = false;
1218
		fbc->no_fbc_reason = "not enough stolen memory";
1219 1220 1221
		goto out;
	}

1222
	if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
1223
	    plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED)
1224 1225 1226 1227 1228
		cache->gen9_wa_cfb_stride =
			DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
	else
		cache->gen9_wa_cfb_stride = 0;

1229 1230
	drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
		    pipe_name(crtc->pipe));
1231
	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1232

1233
	fbc->crtc = crtc;
1234
out:
1235
	mutex_unlock(&fbc->lock);
1236 1237 1238
}

/**
1239
 * intel_fbc_disable - disable FBC if it's associated with crtc
1240 1241 1242 1243
 * @crtc: the CRTC
 *
 * This function disables FBC if it's associated with the provided CRTC.
 */
1244
void intel_fbc_disable(struct intel_crtc *crtc)
1245
{
1246
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1247
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1248
	struct intel_fbc *fbc = &dev_priv->fbc;
1249

1250
	if (!plane->has_fbc)
1251 1252
		return;

1253
	mutex_lock(&fbc->lock);
1254
	if (fbc->crtc == crtc)
1255
		__intel_fbc_disable(dev_priv);
1256
	mutex_unlock(&fbc->lock);
1257 1258 1259
}

/**
1260
 * intel_fbc_global_disable - globally disable FBC
1261 1262 1263 1264
 * @dev_priv: i915 device instance
 *
 * This function disables FBC regardless of which CRTC is associated with it.
 */
1265
void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
1266
{
1267 1268
	struct intel_fbc *fbc = &dev_priv->fbc;

1269
	if (!HAS_FBC(dev_priv))
1270 1271
		return;

1272
	mutex_lock(&fbc->lock);
V
Ville Syrjälä 已提交
1273
	if (fbc->crtc) {
1274
		drm_WARN_ON(&dev_priv->drm, fbc->crtc->active);
1275
		__intel_fbc_disable(dev_priv);
1276
	}
1277
	mutex_unlock(&fbc->lock);
1278 1279
}

1280 1281 1282 1283 1284 1285 1286 1287 1288
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, struct drm_i915_private, fbc.underrun_work);
	struct intel_fbc *fbc = &dev_priv->fbc;

	mutex_lock(&fbc->lock);

	/* Maybe we were scheduled twice. */
V
Ville Syrjälä 已提交
1289
	if (fbc->underrun_detected || !fbc->crtc)
1290 1291
		goto out;

1292
	drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n");
1293 1294
	fbc->underrun_detected = true;

1295
	intel_fbc_deactivate(dev_priv, "FIFO underrun");
1296 1297 1298 1299
out:
	mutex_unlock(&fbc->lock);
}

1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
/*
 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
 * @dev_priv: i915 device instance
 *
 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
 * want to re-enable FBC after an underrun to increase test coverage.
 */
int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
{
	int ret;

	cancel_work_sync(&dev_priv->fbc.underrun_work);

	ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
	if (ret)
		return ret;

	if (dev_priv->fbc.underrun_detected) {
1318 1319
		drm_dbg_kms(&dev_priv->drm,
			    "Re-allowing FBC after fifo underrun\n");
1320 1321 1322 1323 1324 1325 1326 1327 1328
		dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
	}

	dev_priv->fbc.underrun_detected = false;
	mutex_unlock(&dev_priv->fbc.lock);

	return 0;
}

1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
/**
 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
 * @dev_priv: i915 device instance
 *
 * Without FBC, most underruns are harmless and don't really cause too many
 * problems, except for an annoying message on dmesg. With FBC, underruns can
 * become black screens or even worse, especially when paired with bad
 * watermarks. So in order for us to be on the safe side, completely disable FBC
 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
 * already suggests that watermarks may be bad, so try to be as safe as
 * possible.
 *
 * This function is called from the IRQ handler.
 */
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
{
	struct intel_fbc *fbc = &dev_priv->fbc;

1347
	if (!HAS_FBC(dev_priv))
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
		return;

	/* There's no guarantee that underrun_detected won't be set to true
	 * right after this check and before the work is scheduled, but that's
	 * not a problem since we'll check it again under the work function
	 * while FBC is locked. This check here is just to prevent us from
	 * unnecessarily scheduling the work, and it relies on the fact that we
	 * never switch underrun_detect back to false after it's true. */
	if (READ_ONCE(fbc->underrun_detected))
		return;

	schedule_work(&fbc->underrun_work);
}

1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
1373 1374
	if (i915_modparams.enable_fbc >= 0)
		return !!i915_modparams.enable_fbc;
1375

1376 1377 1378
	if (!HAS_FBC(dev_priv))
		return 0;

P
Paulo Zanoni 已提交
1379
	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
1380 1381 1382 1383 1384
		return 1;

	return 0;
}

1385 1386 1387
static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
{
	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1388
	if (intel_vtd_active() &&
1389
	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1390 1391
		drm_info(&dev_priv->drm,
			 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1392 1393 1394 1395 1396 1397
		return true;
	}

	return false;
}

R
Rodrigo Vivi 已提交
1398 1399 1400 1401 1402 1403
/**
 * intel_fbc_init - Initialize FBC
 * @dev_priv: the i915 device
 *
 * This function might be called during PM init process.
 */
1404 1405
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
1406
	struct intel_fbc *fbc = &dev_priv->fbc;
1407

1408
	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1409 1410
	mutex_init(&fbc->lock);
	fbc->active = false;
P
Paulo Zanoni 已提交
1411

1412 1413 1414
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		mkwrite_device_info(dev_priv)->display.has_fbc = false;

1415
	if (need_fbc_vtd_wa(dev_priv))
1416
		mkwrite_device_info(dev_priv)->display.has_fbc = false;
1417

1418
	i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1419 1420
	drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
		    i915_modparams.enable_fbc);
1421

1422
	if (!HAS_FBC(dev_priv)) {
1423
		fbc->no_fbc_reason = "unsupported by this chipset";
1424 1425 1426
		return;
	}

1427
	/* This value was pulled out of someone's hat */
1428
	if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
1429 1430
		intel_de_write(dev_priv, FBC_CONTROL,
		               500 << FBC_CTL_INTERVAL_SHIFT);
1431

1432
	/* We still don't have any sort of hardware state readout for FBC, so
1433 1434
	 * deactivate it in case the BIOS activated it to make sure software
	 * matches the hardware state. */
1435 1436
	if (intel_fbc_hw_is_active(dev_priv))
		intel_fbc_hw_deactivate(dev_priv);
1437
}