intel_fbc.c 48.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29
/**
 * DOC: Frame Buffer Compression (FBC)
 *
 * FBC tries to save memory bandwidth (and so power consumption) by
 * compressing the amount of memory used by the display. It is total
 * transparent to user space and completely handled in the kernel.
30 31
 *
 * The benefits of FBC are mostly visible with solid backgrounds and
R
Rodrigo Vivi 已提交
32 33
 * variation-less patterns. It comes from keeping the memory footprint small
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
R
Rodrigo Vivi 已提交
35 36 37 38
 * i915 is responsible to reserve stolen memory for FBC and configure its
 * offset on proper registers. The hardware takes care of all
 * compress/decompress. However there are many known cases where we have to
 * forcibly disable it to allow proper screen updates.
39 40
 */

41 42
#include <drm/drm_fourcc.h>

R
Rodrigo Vivi 已提交
43
#include "i915_drv.h"
44
#include "i915_vgpu.h"
45
#include "intel_cdclk.h"
46
#include "intel_de.h"
47
#include "intel_display_trace.h"
48
#include "intel_display_types.h"
49
#include "intel_fbc.h"
50
#include "intel_frontbuffer.h"
R
Rodrigo Vivi 已提交
51

52 53 54 55 56 57 58
#define for_each_fbc_id(__fbc_id) \
	for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++)

#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
	for_each_fbc_id(__fbc_id) \
		for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)])

59
struct intel_fbc_funcs {
60 61 62 63 64 65 66
	void (*activate)(struct intel_fbc *fbc);
	void (*deactivate)(struct intel_fbc *fbc);
	bool (*is_active)(struct intel_fbc *fbc);
	bool (*is_compressing)(struct intel_fbc *fbc);
	void (*nuke)(struct intel_fbc *fbc);
	void (*program_cfb)(struct intel_fbc *fbc);
	void (*set_false_color)(struct intel_fbc *fbc, bool enable);
67 68
};

69
struct intel_fbc_state {
70
	struct intel_plane *plane;
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
	unsigned int cfb_stride;
	unsigned int cfb_size;
	unsigned int fence_y_offset;
	u16 override_cfb_stride;
	u16 interval;
	s8 fence_id;
};

struct intel_fbc {
	struct drm_i915_private *i915;
	const struct intel_fbc_funcs *funcs;

	/*
	 * This is always the inner lock when overlapping with
	 * struct_mutex and it's the outer lock when overlapping
	 * with stolen_lock.
	 */
	struct mutex lock;
	unsigned int possible_framebuffer_bits;
	unsigned int busy_bits;

	struct drm_mm_node compressed_fb;
	struct drm_mm_node compressed_llb;

95 96
	enum intel_fbc_id id;

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
	u8 limit;

	bool false_color;

	bool active;
	bool activated;
	bool flip_pending;

	bool underrun_detected;
	struct work_struct underrun_work;

	/*
	 * This structure contains everything that's relevant to program the
	 * hardware registers. When we want to figure out if we need to disable
	 * and re-enable FBC for a new configuration we just check if there's
	 * something different in the struct. The genx_fbc_activate functions
	 * are supposed to read from it in order to program the registers.
	 */
V
Ville Syrjälä 已提交
115
	struct intel_fbc_state state;
116 117 118
	const char *no_fbc_reason;
};

119 120
/* plane stride in pixels */
static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
121
{
122 123 124
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int stride;

125
	stride = plane_state->view.color_plane[0].mapping_stride;
126 127 128 129 130 131 132
	if (!drm_rotation_90_or_270(plane_state->hw.rotation))
		stride /= fb->format->cpp[0];

	return stride;
}

/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
133
static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
134 135 136
{
	unsigned int cpp = 4; /* FBC always 4 bytes per pixel */

137
	return intel_fbc_plane_stride(plane_state) * cpp;
138 139 140
}

/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
141
static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
142
{
143
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
144 145
	unsigned int limit = 4; /* 1:4 compression limit is the worst case */
	unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
146
	unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
147 148 149 150
	unsigned int height = 4; /* FBC segment is 4 lines */
	unsigned int stride;

	/* minimum segment stride we can use */
151
	stride = width * cpp * height / limit;
152

153 154 155 156 157 158 159
	/*
	 * Wa_16011863758: icl+
	 * Avoid some hardware segment address miscalculation.
	 */
	if (DISPLAY_VER(i915) >= 11)
		stride += 64;

160 161 162 163 164 165 166 167 168 169 170
	/*
	 * At least some of the platforms require each 4 line segment to
	 * be 512 byte aligned. Just do it always for simplicity.
	 */
	stride = ALIGN(stride, 512);

	/* convert back to single line equivalent with 1:1 compression limit */
	return stride * limit / height;
}

/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
171
static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
172
{
173 174
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	unsigned int stride = _intel_fbc_cfb_stride(plane_state);
175 176 177 178 179 180

	/*
	 * At least some of the platforms require each 4 line segment to
	 * be 512 byte aligned. Aligning each line to 512 bytes guarantees
	 * that regardless of the compression limit we choose later.
	 */
181
	if (DISPLAY_VER(i915) >= 9)
182
		return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
183 184 185 186
	else
		return stride;
}

187
static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
188
{
189 190
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
191

V
Ville Syrjälä 已提交
192
	if (DISPLAY_VER(i915) == 7)
193
		lines = min(lines, 2048);
V
Ville Syrjälä 已提交
194
	else if (DISPLAY_VER(i915) >= 8)
195
		lines = min(lines, 2560);
196

197
	return lines * intel_fbc_cfb_stride(plane_state);
198 199
}

200
static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
201
{
202 203 204 205
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
	unsigned int stride = _intel_fbc_cfb_stride(plane_state);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
206 207 208 209 210 211 212 213 214

	/*
	 * Override stride in 64 byte units per 4 line segment.
	 *
	 * Gen9 hw miscalculates cfb stride for linear as
	 * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
	 * we always need to use the override there.
	 */
	if (stride != stride_aligned ||
215
	    (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
216 217 218 219 220
		return stride_aligned * 4 / 64;

	return 0;
}

221
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
222
{
V
Ville Syrjälä 已提交
223
	const struct intel_fbc_state *fbc_state = &fbc->state;
224
	struct drm_i915_private *i915 = fbc->i915;
225 226 227
	unsigned int cfb_stride;
	u32 fbc_ctl;

V
Ville Syrjälä 已提交
228
	cfb_stride = fbc_state->cfb_stride / fbc->limit;
229 230 231 232 233 234 235 236

	/* FBC_CTL wants 32B or 64B units */
	if (DISPLAY_VER(i915) == 2)
		cfb_stride = (cfb_stride / 32) - 1;
	else
		cfb_stride = (cfb_stride / 64) - 1;

	fbc_ctl = FBC_CTL_PERIODIC |
V
Ville Syrjälä 已提交
237
		FBC_CTL_INTERVAL(fbc_state->interval) |
238 239 240 241 242
		FBC_CTL_STRIDE(cfb_stride);

	if (IS_I945GM(i915))
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */

V
Ville Syrjälä 已提交
243 244
	if (fbc_state->fence_id >= 0)
		fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
245 246 247 248

	return fbc_ctl;
}

249
static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
250
{
V
Ville Syrjälä 已提交
251
	const struct intel_fbc_state *fbc_state = &fbc->state;
252 253 254
	u32 fbc_ctl2;

	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
V
Ville Syrjälä 已提交
255
		FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
256

V
Ville Syrjälä 已提交
257
	if (fbc_state->fence_id >= 0)
258
		fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
259 260 261 262

	return fbc_ctl2;
}

263
static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
264
{
265
	struct drm_i915_private *i915 = fbc->i915;
266 267 268
	u32 fbc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
269
	fbc_ctl = intel_de_read(i915, FBC_CONTROL);
270 271 272 273
	if ((fbc_ctl & FBC_CTL_EN) == 0)
		return;

	fbc_ctl &= ~FBC_CTL_EN;
V
Ville Syrjälä 已提交
274
	intel_de_write(i915, FBC_CONTROL, fbc_ctl);
275 276

	/* Wait for compressing bit to clear */
V
Ville Syrjälä 已提交
277
	if (intel_de_wait_for_clear(i915, FBC_STATUS,
278
				    FBC_STAT_COMPRESSING, 10)) {
V
Ville Syrjälä 已提交
279
		drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
280 281 282 283
		return;
	}
}

284
static void i8xx_fbc_activate(struct intel_fbc *fbc)
285
{
V
Ville Syrjälä 已提交
286
	const struct intel_fbc_state *fbc_state = &fbc->state;
287
	struct drm_i915_private *i915 = fbc->i915;
288 289 290 291
	int i;

	/* Clear old tags */
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
V
Ville Syrjälä 已提交
292
		intel_de_write(i915, FBC_TAG(i), 0);
293

V
Ville Syrjälä 已提交
294 295
	if (DISPLAY_VER(i915) == 4) {
		intel_de_write(i915, FBC_CONTROL2,
296
			       i965_fbc_ctl2(fbc));
V
Ville Syrjälä 已提交
297
		intel_de_write(i915, FBC_FENCE_OFF,
V
Ville Syrjälä 已提交
298
			       fbc_state->fence_y_offset);
299 300
	}

V
Ville Syrjälä 已提交
301
	intel_de_write(i915, FBC_CONTROL,
302
		       FBC_CTL_EN | i8xx_fbc_ctl(fbc));
303 304
}

305
static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
306
{
307
	return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
308 309
}

310
static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
311
{
312
	return intel_de_read(fbc->i915, FBC_STATUS) &
313 314 315
		(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
}

316
static void i8xx_fbc_nuke(struct intel_fbc *fbc)
317
{
V
Ville Syrjälä 已提交
318 319
	struct intel_fbc_state *fbc_state = &fbc->state;
	enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
320
	struct drm_i915_private *dev_priv = fbc->i915;
321 322 323 324 325 326 327

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
			  intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
	spin_unlock_irq(&dev_priv->uncore.lock);
}

328
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
329
{
330
	struct drm_i915_private *i915 = fbc->i915;
331 332 333 334 335 336 337 338 339 340 341 342

	GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
					 fbc->compressed_fb.start, U32_MAX));
	GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
					 fbc->compressed_llb.start, U32_MAX));

	intel_de_write(i915, FBC_CFB_BASE,
		       i915->dsm.start + fbc->compressed_fb.start);
	intel_de_write(i915, FBC_LL_BASE,
		       i915->dsm.start + fbc->compressed_llb.start);
}

343 344 345 346 347
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
	.activate = i8xx_fbc_activate,
	.deactivate = i8xx_fbc_deactivate,
	.is_active = i8xx_fbc_is_active,
	.is_compressing = i8xx_fbc_is_compressing,
348
	.nuke = i8xx_fbc_nuke,
349
	.program_cfb = i8xx_fbc_program_cfb,
350 351
};

352
static void i965_fbc_nuke(struct intel_fbc *fbc)
353
{
V
Ville Syrjälä 已提交
354 355
	struct intel_fbc_state *fbc_state = &fbc->state;
	enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
356
	struct drm_i915_private *dev_priv = fbc->i915;
357 358 359 360 361 362 363 364 365 366 367 368 369

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
			  intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
	spin_unlock_irq(&dev_priv->uncore.lock);
}

static const struct intel_fbc_funcs i965_fbc_funcs = {
	.activate = i8xx_fbc_activate,
	.deactivate = i8xx_fbc_deactivate,
	.is_active = i8xx_fbc_is_active,
	.is_compressing = i8xx_fbc_is_compressing,
	.nuke = i965_fbc_nuke,
370
	.program_cfb = i8xx_fbc_program_cfb,
371 372
};

373
static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
374
{
375
	switch (fbc->limit) {
376
	default:
377
		MISSING_CASE(fbc->limit);
378 379 380 381 382 383 384 385 386 387
		fallthrough;
	case 1:
		return DPFC_CTL_LIMIT_1X;
	case 2:
		return DPFC_CTL_LIMIT_2X;
	case 4:
		return DPFC_CTL_LIMIT_4X;
	}
}

388
static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
389
{
V
Ville Syrjälä 已提交
390
	const struct intel_fbc_state *fbc_state = &fbc->state;
391
	struct drm_i915_private *i915 = fbc->i915;
392 393
	u32 dpfc_ctl;

394
	dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
V
Ville Syrjälä 已提交
395
		DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
396

397
	if (IS_G4X(i915))
398
		dpfc_ctl |= DPFC_CTL_SR_EN;
399

V
Ville Syrjälä 已提交
400
	if (fbc_state->fence_id >= 0) {
401
		dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
402 403

		if (DISPLAY_VER(i915) < 6)
V
Ville Syrjälä 已提交
404
			dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
405 406 407 408 409
	}

	return dpfc_ctl;
}

410
static void g4x_fbc_activate(struct intel_fbc *fbc)
411
{
V
Ville Syrjälä 已提交
412
	const struct intel_fbc_state *fbc_state = &fbc->state;
413
	struct drm_i915_private *i915 = fbc->i915;
414

V
Ville Syrjälä 已提交
415
	intel_de_write(i915, DPFC_FENCE_YOFF,
V
Ville Syrjälä 已提交
416
		       fbc_state->fence_y_offset);
417

V
Ville Syrjälä 已提交
418
	intel_de_write(i915, DPFC_CONTROL,
419
		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
420 421
}

422
static void g4x_fbc_deactivate(struct intel_fbc *fbc)
423
{
424
	struct drm_i915_private *i915 = fbc->i915;
425 426 427
	u32 dpfc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
428
	dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
429 430
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
V
Ville Syrjälä 已提交
431
		intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
432 433 434
	}
}

435
static bool g4x_fbc_is_active(struct intel_fbc *fbc)
436
{
437
	return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
438 439
}

440
static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
441
{
442
	return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
443 444
}

445
static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
446
{
447
	struct drm_i915_private *i915 = fbc->i915;
448 449 450 451

	intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
}

452 453 454 455 456
static const struct intel_fbc_funcs g4x_fbc_funcs = {
	.activate = g4x_fbc_activate,
	.deactivate = g4x_fbc_deactivate,
	.is_active = g4x_fbc_is_active,
	.is_compressing = g4x_fbc_is_compressing,
457
	.nuke = i965_fbc_nuke,
458
	.program_cfb = g4x_fbc_program_cfb,
459 460
};

461
static void ilk_fbc_activate(struct intel_fbc *fbc)
462
{
V
Ville Syrjälä 已提交
463
	struct intel_fbc_state *fbc_state = &fbc->state;
464
	struct drm_i915_private *i915 = fbc->i915;
465

466
	intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
V
Ville Syrjälä 已提交
467
		       fbc_state->fence_y_offset);
468

469
	intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
470
		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
471 472
}

473
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
474
{
475
	struct drm_i915_private *i915 = fbc->i915;
476 477 478
	u32 dpfc_ctl;

	/* Disable compression */
479
	dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
480 481
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
482
		intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
483 484 485
	}
}

486
static bool ilk_fbc_is_active(struct intel_fbc *fbc)
487
{
488
	return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
489 490
}

491
static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
492
{
493
	return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
494 495
}

496
static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
497
{
498
	struct drm_i915_private *i915 = fbc->i915;
499

500
	intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
501 502
}

503 504 505 506 507
static const struct intel_fbc_funcs ilk_fbc_funcs = {
	.activate = ilk_fbc_activate,
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
	.is_compressing = ilk_fbc_is_compressing,
508
	.nuke = i965_fbc_nuke,
509
	.program_cfb = ilk_fbc_program_cfb,
510 511
};

512
static void snb_fbc_program_fence(struct intel_fbc *fbc)
513
{
V
Ville Syrjälä 已提交
514
	const struct intel_fbc_state *fbc_state = &fbc->state;
515
	struct drm_i915_private *i915 = fbc->i915;
516 517
	u32 ctl = 0;

V
Ville Syrjälä 已提交
518 519
	if (fbc_state->fence_id >= 0)
		ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
520 521

	intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
V
Ville Syrjälä 已提交
522
	intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
523 524
}

525
static void snb_fbc_activate(struct intel_fbc *fbc)
526
{
527
	snb_fbc_program_fence(fbc);
528

529
	ilk_fbc_activate(fbc);
530 531
}

532
static void snb_fbc_nuke(struct intel_fbc *fbc)
533
{
534 535
	struct drm_i915_private *i915 = fbc->i915;

536 537
	intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
	intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
538 539 540 541 542 543 544 545
}

static const struct intel_fbc_funcs snb_fbc_funcs = {
	.activate = snb_fbc_activate,
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
	.is_compressing = ilk_fbc_is_compressing,
	.nuke = snb_fbc_nuke,
546
	.program_cfb = ilk_fbc_program_cfb,
547 548
};

549
static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
550
{
V
Ville Syrjälä 已提交
551
	const struct intel_fbc_state *fbc_state = &fbc->state;
552
	struct drm_i915_private *i915 = fbc->i915;
553
	u32 val = 0;
554

V
Ville Syrjälä 已提交
555
	if (fbc_state->override_cfb_stride)
556
		val |= FBC_STRIDE_OVERRIDE |
V
Ville Syrjälä 已提交
557
			FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
558

559
	intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
560
}
561

562
static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
563
{
V
Ville Syrjälä 已提交
564
	const struct intel_fbc_state *fbc_state = &fbc->state;
565
	struct drm_i915_private *i915 = fbc->i915;
566
	u32 val = 0;
567

568
	/* Display WA #0529: skl, kbl, bxt. */
V
Ville Syrjälä 已提交
569
	if (fbc_state->override_cfb_stride)
570
		val |= CHICKEN_FBC_STRIDE_OVERRIDE |
V
Ville Syrjälä 已提交
571
			CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
572

573 574 575 576 577
	intel_de_rmw(i915, CHICKEN_MISC_4,
		     CHICKEN_FBC_STRIDE_OVERRIDE |
		     CHICKEN_FBC_STRIDE_MASK, val);
}

578
static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
579
{
V
Ville Syrjälä 已提交
580
	const struct intel_fbc_state *fbc_state = &fbc->state;
581
	struct drm_i915_private *i915 = fbc->i915;
582 583
	u32 dpfc_ctl;

584
	dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
585

586
	if (IS_IVYBRIDGE(i915))
V
Ville Syrjälä 已提交
587
		dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
588

V
Ville Syrjälä 已提交
589
	if (fbc_state->fence_id >= 0)
590
		dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
591

592
	if (fbc->false_color)
593
		dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
594

595 596 597
	return dpfc_ctl;
}

598
static void ivb_fbc_activate(struct intel_fbc *fbc)
599
{
600 601
	struct drm_i915_private *i915 = fbc->i915;

V
Ville Syrjälä 已提交
602
	if (DISPLAY_VER(i915) >= 10)
603
		glk_fbc_program_cfb_stride(fbc);
V
Ville Syrjälä 已提交
604
	else if (DISPLAY_VER(i915) == 9)
605
		skl_fbc_program_cfb_stride(fbc);
606

V
Ville Syrjälä 已提交
607
	if (i915->ggtt.num_fences)
608
		snb_fbc_program_fence(fbc);
609

610
	intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
611
		       DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
612 613
}

614
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
615
{
616
	return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
617 618
}

619
static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
620 621
				    bool enable)
{
622
	intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
623
		     DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
624 625
}

V
Ville Syrjälä 已提交
626 627
static const struct intel_fbc_funcs ivb_fbc_funcs = {
	.activate = ivb_fbc_activate,
628 629
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
V
Ville Syrjälä 已提交
630
	.is_compressing = ivb_fbc_is_compressing,
631
	.nuke = snb_fbc_nuke,
632
	.program_cfb = ilk_fbc_program_cfb,
633
	.set_false_color = ivb_fbc_set_false_color,
634 635
};

636
static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
637
{
638
	return fbc->funcs->is_active(fbc);
639 640
}

641
static void intel_fbc_hw_activate(struct intel_fbc *fbc)
642
{
V
Ville Syrjälä 已提交
643
	trace_intel_fbc_activate(fbc->state.plane);
644

645
	fbc->active = true;
646
	fbc->activated = true;
647

648
	fbc->funcs->activate(fbc);
649 650
}

651
static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
652
{
V
Ville Syrjälä 已提交
653
	trace_intel_fbc_deactivate(fbc->state.plane);
654

655 656
	fbc->active = false;

657
	fbc->funcs->deactivate(fbc);
658 659
}

660
static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
661
{
662
	return fbc->funcs->is_compressing(fbc);
663 664
}

665
static void intel_fbc_nuke(struct intel_fbc *fbc)
666
{
V
Ville Syrjälä 已提交
667
	trace_intel_fbc_nuke(fbc->state.plane);
668

669
	fbc->funcs->nuke(fbc);
670 671
}

672
static void intel_fbc_activate(struct intel_fbc *fbc)
673
{
674 675
	intel_fbc_hw_activate(fbc);
	intel_fbc_nuke(fbc);
V
Ville Syrjälä 已提交
676 677

	fbc->no_fbc_reason = NULL;
678 679
}

680
static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
P
Paulo Zanoni 已提交
681
{
682
	struct drm_i915_private *i915 = fbc->i915;
683

V
Ville Syrjälä 已提交
684
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
P
Paulo Zanoni 已提交
685

686
	if (fbc->active)
687
		intel_fbc_hw_deactivate(fbc);
688 689

	fbc->no_fbc_reason = reason;
690 691
}

692 693
static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
{
694
	if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
695 696 697 698 699
		return BIT_ULL(28);
	else
		return BIT_ULL(32);
}

V
Ville Syrjälä 已提交
700
static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
701
{
702 703 704 705 706 707
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
V
Ville Syrjälä 已提交
708 709 710
	if (IS_BROADWELL(i915) ||
	    (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
		end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
711
	else
712
		end = U64_MAX;
713

V
Ville Syrjälä 已提交
714
	return min(end, intel_fbc_cfb_base_max(i915));
715 716
}

717
static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
718
{
V
Ville Syrjälä 已提交
719
	return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
720 721
}

V
Ville Syrjälä 已提交
722
static int intel_fbc_max_limit(struct drm_i915_private *i915)
723 724
{
	/* WaFbcOnly1to1Ratio:ctg */
V
Ville Syrjälä 已提交
725
	if (IS_G4X(i915))
726 727
		return 1;

728 729 730 731
	/*
	 * FBC2 can only do 1:1, 1:2, 1:4, we limit
	 * FBC1 to the same out of convenience.
	 */
732
	return 4;
733 734
}

735
static int find_compression_limit(struct intel_fbc *fbc,
736
				  unsigned int size, int min_limit)
737
{
738
	struct drm_i915_private *i915 = fbc->i915;
V
Ville Syrjälä 已提交
739
	u64 end = intel_fbc_stolen_end(i915);
740 741 742
	int ret, limit = min_limit;

	size /= limit;
743 744

	/* Try to over-allocate to reduce reallocations and fragmentation. */
V
Ville Syrjälä 已提交
745
	ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
746
						   size <<= 1, 4096, 0, end);
747
	if (ret == 0)
748
		return limit;
749

V
Ville Syrjälä 已提交
750 751
	for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
		ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
752 753 754
							   size >>= 1, 4096, 0, end);
		if (ret == 0)
			return limit;
755
	}
756 757

	return 0;
758 759
}

760
static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
761
			       unsigned int size, int min_limit)
762
{
763
	struct drm_i915_private *i915 = fbc->i915;
764
	int ret;
765

V
Ville Syrjälä 已提交
766
	drm_WARN_ON(&i915->drm,
767
		    drm_mm_node_allocated(&fbc->compressed_fb));
V
Ville Syrjälä 已提交
768
	drm_WARN_ON(&i915->drm,
769
		    drm_mm_node_allocated(&fbc->compressed_llb));
770

V
Ville Syrjälä 已提交
771 772
	if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
		ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
773 774 775 776 777
						  4096, 4096);
		if (ret)
			goto err;
	}

778
	ret = find_compression_limit(fbc, size, min_limit);
779 780
	if (!ret)
		goto err_llb;
781
	else if (ret > min_limit)
V
Ville Syrjälä 已提交
782
		drm_info_once(&i915->drm,
783
			      "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
784

785
	fbc->limit = ret;
786

V
Ville Syrjälä 已提交
787
	drm_dbg_kms(&i915->drm,
788 789
		    "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
		    fbc->compressed_fb.size, fbc->limit);
790 791 792 793

	return 0;

err_llb:
794
	if (drm_mm_node_allocated(&fbc->compressed_llb))
V
Ville Syrjälä 已提交
795
		i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
796
err:
V
Ville Syrjälä 已提交
797 798
	if (drm_mm_initialized(&i915->mm.stolen))
		drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
799 800 801
	return -ENOSPC;
}

802
static void intel_fbc_program_cfb(struct intel_fbc *fbc)
803
{
804
	fbc->funcs->program_cfb(fbc);
805 806
}

807
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
808
{
809
	struct drm_i915_private *i915 = fbc->i915;
810

811
	if (WARN_ON(intel_fbc_hw_is_active(fbc)))
812 813
		return;

814
	if (drm_mm_node_allocated(&fbc->compressed_llb))
V
Ville Syrjälä 已提交
815
		i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
816
	if (drm_mm_node_allocated(&fbc->compressed_fb))
V
Ville Syrjälä 已提交
817
		i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
818 819
}

820
void intel_fbc_cleanup(struct drm_i915_private *i915)
P
Paulo Zanoni 已提交
821
{
822 823
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
824

825 826 827 828
	for_each_intel_fbc(i915, fbc, fbc_id) {
		mutex_lock(&fbc->lock);
		__intel_fbc_cleanup_cfb(fbc);
		mutex_unlock(&fbc->lock);
829

830 831
		kfree(fbc);
	}
P
Paulo Zanoni 已提交
832 833
}

834
static bool stride_is_valid(const struct intel_plane_state *plane_state)
835
{
836 837 838 839 840
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int stride = intel_fbc_plane_stride(plane_state) *
		fb->format->cpp[0];

841
	/* This should have been caught earlier. */
V
Ville Syrjälä 已提交
842
	if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0))
843
		return false;
844 845

	/* Below are the additional FBC restrictions. */
846 847
	if (stride < 512)
		return false;
848

V
Ville Syrjälä 已提交
849
	if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3)
850 851
		return stride == 4096 || stride == 8192;

V
Ville Syrjälä 已提交
852
	if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048)
853 854
		return false;

855
	/* Display WA #1105: skl,bxt,kbl,cfl,glk */
V
Ville Syrjälä 已提交
856
	if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) &&
857
	    fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
858 859
		return false;

860 861 862 863 864 865
	if (stride > 16384)
		return false;

	return true;
}

866
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
867
{
868 869 870 871
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;

	switch (fb->format->format) {
872 873 874 875 876 877
	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_XBGR8888:
		return true;
	case DRM_FORMAT_XRGB1555:
	case DRM_FORMAT_RGB565:
		/* 16bpp not supported on gen2 */
V
Ville Syrjälä 已提交
878
		if (DISPLAY_VER(i915) == 2)
879 880
			return false;
		/* WaFbcOnly1to1Ratio:ctg */
V
Ville Syrjälä 已提交
881
		if (IS_G4X(i915))
882 883 884 885 886 887 888
			return false;
		return true;
	default:
		return false;
	}
}

889
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
890
{
891 892 893 894 895
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int rotation = plane_state->hw.rotation;

	if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 &&
896 897
	    drm_rotation_90_or_270(rotation))
		return false;
V
Ville Syrjälä 已提交
898
	else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) &&
899 900 901 902 903 904
		 rotation != DRM_MODE_ROTATE_0)
		return false;

	return true;
}

905 906 907
/*
 * For some reason, the hardware tracking starts looking at whatever we
 * programmed as the display plane base address register. It does not look at
908 909
 * the X and Y offset registers. That's why we include the src x/y offsets
 * instead of just looking at the plane size.
910
 */
911
static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
912
{
913
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
914
	unsigned int effective_w, effective_h, max_w, max_h;
915

V
Ville Syrjälä 已提交
916
	if (DISPLAY_VER(i915) >= 10) {
917 918
		max_w = 5120;
		max_h = 4096;
V
Ville Syrjälä 已提交
919
	} else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
920 921
		max_w = 4096;
		max_h = 4096;
V
Ville Syrjälä 已提交
922
	} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
923 924 925 926 927 928 929
		max_w = 4096;
		max_h = 2048;
	} else {
		max_w = 2048;
		max_h = 1536;
	}

930 931 932 933
	effective_w = plane_state->view.color_plane[0].x +
		(drm_rect_width(&plane_state->uapi.src) >> 16);
	effective_h = plane_state->view.color_plane[0].y +
		(drm_rect_height(&plane_state->uapi.src) >> 16);
934 935

	return effective_w <= max_w && effective_h <= max_h;
936 937
}

938
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
939
{
940 941 942 943
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;

	switch (fb->modifier) {
944 945
	case DRM_FORMAT_MOD_LINEAR:
	case I915_FORMAT_MOD_Y_TILED:
946
	case I915_FORMAT_MOD_Yf_TILED:
V
Ville Syrjälä 已提交
947
		return DISPLAY_VER(i915) >= 9;
948
	case I915_FORMAT_MOD_X_TILED:
949 950 951 952 953 954
		return true;
	default:
		return false;
	}
}

V
Ville Syrjälä 已提交
955 956 957
static void intel_fbc_update_state(struct intel_atomic_state *state,
				   struct intel_crtc *crtc,
				   struct intel_plane *plane)
958
{
959 960 961 962 963 964
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	struct intel_fbc *fbc = plane->fbc;
V
Ville Syrjälä 已提交
965
	struct intel_fbc_state *fbc_state = &fbc->state;
966

V
Ville Syrjälä 已提交
967
	WARN_ON(plane_state->no_fbc_reason);
968

V
Ville Syrjälä 已提交
969
	fbc_state->plane = plane;
970

971
	/* FBC1 compression interval: arbitrary choice of 1 second */
V
Ville Syrjälä 已提交
972
	fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
973

V
Ville Syrjälä 已提交
974
	fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
975

V
Ville Syrjälä 已提交
976
	drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
977
		    !plane_state->ggtt_vma->fence);
978 979

	if (plane_state->flags & PLANE_HAS_FENCE &&
980
	    plane_state->ggtt_vma->fence)
V
Ville Syrjälä 已提交
981
		fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
982
	else
V
Ville Syrjälä 已提交
983
		fbc_state->fence_id = -1;
984

V
Ville Syrjälä 已提交
985 986 987
	fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
	fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
	fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
988 989
}

V
Ville Syrjälä 已提交
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
{
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);

	/* The use of a CPU fence is one of two ways to detect writes by the
	 * CPU to the scanout and trigger updates to the FBC.
	 *
	 * The other method is by software tracking (see
	 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
	 * the current compressed buffer and recompress it.
	 *
	 * Note that is possible for a tiled surface to be unmappable (and
	 * so have no fence associated with it) due to aperture constraints
	 * at the time of pinning.
	 *
	 * FIXME with 90/270 degree rotation we should use the fence on
	 * the normal GTT view (the rotated view doesn't even have a
	 * fence). Would need changes to the FBC fence Y offset as well.
	 * For now this will effectively disable FBC with 90/270 degree
	 * rotation.
	 */
	return DISPLAY_VER(i915) >= 9 ||
		(plane_state->flags & PLANE_HAS_FENCE &&
		 plane_state->ggtt_vma->fence);
}

static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
{
	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
	struct intel_fbc *fbc = plane->fbc;

	return intel_fbc_min_limit(plane_state) <= fbc->limit &&
		intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
}

static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
1026
{
V
Ville Syrjälä 已提交
1027 1028 1029
	return !plane_state->no_fbc_reason &&
		intel_fbc_is_fence_ok(plane_state) &&
		intel_fbc_is_cfb_ok(plane_state);
1030 1031
}

1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
static int intel_fbc_check_plane(struct intel_atomic_state *state,
				 struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
	const struct intel_crtc_state *crtc_state;
	struct intel_fbc *fbc = plane->fbc;

	if (!fbc)
		return 0;

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	if (intel_vgpu_active(i915)) {
		plane_state->no_fbc_reason = "VGPU active";
		return 0;
	}

	if (!i915->params.enable_fbc) {
		plane_state->no_fbc_reason = "disabled per module param or by default";
		return 0;
	}

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	if (!plane_state->uapi.visible) {
		plane_state->no_fbc_reason = "plane not visible";
		return 0;
	}

	crtc_state = intel_atomic_get_new_crtc_state(state, crtc);

	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
		plane_state->no_fbc_reason = "interlaced mode not supported";
		return 0;
	}

1068 1069 1070 1071 1072
	if (crtc_state->double_wide) {
		plane_state->no_fbc_reason = "double wide pipe not supported";
		return 0;
	}

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
	/*
	 * Display 12+ is not supporting FBC with PSR2.
	 * Recommendation is to keep this combination disabled
	 * Bspec: 50422 HSD: 14010260002
	 */
	if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
		plane_state->no_fbc_reason = "PSR2 enabled";
		return false;
	}

	if (!pixel_format_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "pixel format not supported";
		return 0;
	}

	if (!tiling_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "tiling not supported";
		return 0;
	}

	if (!rotation_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "rotation not supported";
		return 0;
	}

	if (!stride_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "stride not supported";
		return 0;
	}

	if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
	    fb->format->has_alpha) {
		plane_state->no_fbc_reason = "per-pixel alpha not supported";
		return false;
	}

	if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
		plane_state->no_fbc_reason = "plane size too big";
		return 0;
	}

	/*
	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
	 * and screen flicker.
	 */
	if (DISPLAY_VER(i915) >= 9 &&
	    plane_state->view.color_plane[0].y & 3) {
		plane_state->no_fbc_reason = "plane start Y offset misaligned";
		return false;
	}

	/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
	if (DISPLAY_VER(i915) >= 11 &&
	    (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
		plane_state->no_fbc_reason = "plane end Y offset misaligned";
		return false;
	}

	/* WaFbcExceedCdClockThreshold:hsw,bdw */
	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
		const struct intel_cdclk_state *cdclk_state;

		cdclk_state = intel_atomic_get_cdclk_state(state);
		if (IS_ERR(cdclk_state))
			return PTR_ERR(cdclk_state);

		if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
			plane_state->no_fbc_reason = "pixel rate too high";
			return 0;
		}
	}

	plane_state->no_fbc_reason = NULL;

	return 0;
}

1151

1152 1153 1154
static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
				    struct intel_crtc *crtc,
				    struct intel_plane *plane)
1155
{
1156 1157 1158 1159 1160 1161 1162 1163
	const struct intel_crtc_state *new_crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *old_plane_state =
		intel_atomic_get_old_plane_state(state, plane);
	const struct intel_plane_state *new_plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
	const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
1164

1165
	if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
1166 1167
		return false;

V
Ville Syrjälä 已提交
1168 1169
	if (!intel_fbc_is_ok(old_plane_state) ||
	    !intel_fbc_is_ok(new_plane_state))
1170 1171 1172
		return false;

	if (old_fb->format->format != new_fb->format->format)
1173 1174
		return false;

1175
	if (old_fb->modifier != new_fb->modifier)
1176 1177
		return false;

1178 1179
	if (intel_fbc_plane_stride(old_plane_state) !=
	    intel_fbc_plane_stride(new_plane_state))
1180 1181
		return false;

V
Ville Syrjälä 已提交
1182 1183
	if (intel_fbc_cfb_stride(old_plane_state) !=
	    intel_fbc_cfb_stride(new_plane_state))
1184 1185
		return false;

V
Ville Syrjälä 已提交
1186 1187
	if (intel_fbc_cfb_size(old_plane_state) !=
	    intel_fbc_cfb_size(new_plane_state))
1188 1189
		return false;

V
Ville Syrjälä 已提交
1190 1191
	if (intel_fbc_override_cfb_stride(old_plane_state) !=
	    intel_fbc_override_cfb_stride(new_plane_state))
1192 1193 1194 1195 1196
		return false;

	return true;
}

1197 1198 1199
static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
				   struct intel_crtc *crtc,
				   struct intel_plane *plane)
1200
{
1201
	struct drm_i915_private *i915 = to_i915(state->base.dev);
1202
	struct intel_fbc *fbc = plane->fbc;
1203
	bool need_vblank_wait = false;
1204

1205
	fbc->flip_pending = true;
1206

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	if (intel_fbc_can_flip_nuke(state, crtc, plane))
		return need_vblank_wait;

	intel_fbc_deactivate(fbc, "update pending");

	/*
	 * Display WA #1198: glk+
	 * Need an extra vblank wait between FBC disable and most plane
	 * updates. Bspec says this is only needed for plane disable, but
	 * that is not true. Touching most plane registers will cause the
	 * corruption to appear. Also SKL/derivatives do not seem to be
	 * affected.
	 *
	 * TODO: could optimize this a bit by sampling the frame
	 * counter when we disable FBC (if it was already done earlier)
	 * and skipping the extra vblank wait before the plane update
	 * if at least one frame has already passed.
	 */
	if (fbc->activated && DISPLAY_VER(i915) >= 10)
		need_vblank_wait = true;
	fbc->activated = false;
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247

	return need_vblank_wait;
}

bool intel_fbc_pre_update(struct intel_atomic_state *state,
			  struct intel_crtc *crtc)
{
	const struct intel_plane_state *plane_state;
	bool need_vblank_wait = false;
	struct intel_plane *plane;
	int i;

	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;

		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

V
Ville Syrjälä 已提交
1248
		if (fbc->state.plane == plane)
1249 1250 1251 1252
			need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);

		mutex_unlock(&fbc->lock);
	}
1253 1254

	return need_vblank_wait;
1255 1256
}

1257
static void __intel_fbc_disable(struct intel_fbc *fbc)
1258
{
1259
	struct drm_i915_private *i915 = fbc->i915;
V
Ville Syrjälä 已提交
1260
	struct intel_plane *plane = fbc->state.plane;
1261

V
Ville Syrjälä 已提交
1262 1263
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
	drm_WARN_ON(&i915->drm, fbc->active);
1264

1265 1266
	drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
		    plane->base.base.id, plane->base.name);
1267

1268
	__intel_fbc_cleanup_cfb(fbc);
1269

V
Ville Syrjälä 已提交
1270
	fbc->state.plane = NULL;
1271 1272
}

1273
static void __intel_fbc_post_update(struct intel_fbc *fbc)
1274
{
1275
	struct drm_i915_private *i915 = fbc->i915;
1276

V
Ville Syrjälä 已提交
1277
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
1278

1279
	if (!fbc->busy_bits)
1280
		intel_fbc_activate(fbc);
1281
	else
1282
		intel_fbc_deactivate(fbc, "frontbuffer write");
P
Paulo Zanoni 已提交
1283 1284
}

1285 1286
void intel_fbc_post_update(struct intel_atomic_state *state,
			   struct intel_crtc *crtc)
P
Paulo Zanoni 已提交
1287
{
1288 1289 1290
	const struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;
1291

1292 1293
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;
1294

1295 1296 1297 1298 1299
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

V
Ville Syrjälä 已提交
1300
		if (fbc->state.plane == plane) {
1301 1302 1303 1304 1305
			fbc->flip_pending = false;
			__intel_fbc_post_update(fbc);
		}

		mutex_unlock(&fbc->lock);
1306
	}
1307 1308
}

1309 1310
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
V
Ville Syrjälä 已提交
1311 1312
	if (fbc->state.plane)
		return fbc->state.plane->frontbuffer_bit;
1313 1314 1315 1316
	else
		return fbc->possible_framebuffer_bits;
}

1317 1318 1319
static void __intel_fbc_invalidate(struct intel_fbc *fbc,
				   unsigned int frontbuffer_bits,
				   enum fb_op_origin origin)
1320
{
1321
	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1322 1323
		return;

1324
	mutex_lock(&fbc->lock);
P
Paulo Zanoni 已提交
1325

1326
	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
1327

V
Ville Syrjälä 已提交
1328
	if (fbc->state.plane && fbc->busy_bits)
1329
		intel_fbc_deactivate(fbc, "frontbuffer write");
P
Paulo Zanoni 已提交
1330

1331
	mutex_unlock(&fbc->lock);
1332 1333
}

1334 1335 1336
void intel_fbc_invalidate(struct drm_i915_private *i915,
			  unsigned int frontbuffer_bits,
			  enum fb_op_origin origin)
1337
{
1338 1339
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1340

1341 1342 1343 1344
	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_invalidate(fbc, frontbuffer_bits, origin);

}
1345

1346 1347 1348 1349
static void __intel_fbc_flush(struct intel_fbc *fbc,
			      unsigned int frontbuffer_bits,
			      enum fb_op_origin origin)
{
1350
	mutex_lock(&fbc->lock);
1351

1352
	fbc->busy_bits &= ~frontbuffer_bits;
1353

1354
	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1355 1356
		goto out;

V
Ville Syrjälä 已提交
1357
	if (!fbc->busy_bits && fbc->state.plane &&
1358
	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1359
		if (fbc->active)
1360
			intel_fbc_nuke(fbc);
1361
		else if (!fbc->flip_pending)
1362
			__intel_fbc_post_update(fbc);
1363
	}
P
Paulo Zanoni 已提交
1364

1365
out:
1366
	mutex_unlock(&fbc->lock);
1367 1368
}

1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
void intel_fbc_flush(struct drm_i915_private *i915,
		     unsigned int frontbuffer_bits,
		     enum fb_op_origin origin)
{
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;

	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_flush(fbc, frontbuffer_bits, origin);
}

1380
int intel_fbc_atomic_check(struct intel_atomic_state *state)
1381
{
1382
	struct intel_plane_state *plane_state;
1383
	struct intel_plane *plane;
1384
	int i;
1385

1386
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1387
		int ret;
1388

1389 1390 1391
		ret = intel_fbc_check_plane(state, plane);
		if (ret)
			return ret;
1392 1393
	}

1394
	return 0;
1395 1396
}

1397 1398 1399
static void __intel_fbc_enable(struct intel_atomic_state *state,
			       struct intel_crtc *crtc,
			       struct intel_plane *plane)
1400
{
1401
	struct drm_i915_private *i915 = to_i915(state->base.dev);
1402 1403
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
1404
	struct intel_fbc *fbc = plane->fbc;
1405

V
Ville Syrjälä 已提交
1406 1407
	if (fbc->state.plane) {
		if (fbc->state.plane != plane)
1408
			return;
1409

V
Ville Syrjälä 已提交
1410
		if (intel_fbc_is_ok(plane_state))
1411
			return;
1412

1413
		__intel_fbc_disable(fbc);
1414
	}
1415

V
Ville Syrjälä 已提交
1416
	drm_WARN_ON(&i915->drm, fbc->active);
1417

V
Ville Syrjälä 已提交
1418 1419 1420
	fbc->no_fbc_reason = plane_state->no_fbc_reason;
	if (fbc->no_fbc_reason)
		return;
1421

V
Ville Syrjälä 已提交
1422 1423
	if (!intel_fbc_is_fence_ok(plane_state)) {
		fbc->no_fbc_reason = "framebuffer not fenced";
1424
		return;
V
Ville Syrjälä 已提交
1425
	}
1426

1427 1428 1429 1430 1431
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "FIFO underrun";
		return;
	}

V
Ville Syrjälä 已提交
1432 1433
	if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
				intel_fbc_min_limit(plane_state))) {
1434
		fbc->no_fbc_reason = "not enough stolen memory";
1435
		return;
1436 1437
	}

1438 1439
	drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
		    plane->base.base.id, plane->base.name);
1440
	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1441

V
Ville Syrjälä 已提交
1442
	intel_fbc_update_state(state, crtc, plane);
1443

1444
	intel_fbc_program_cfb(fbc);
1445 1446 1447
}

/**
1448
 * intel_fbc_disable - disable FBC if it's associated with crtc
1449 1450 1451 1452
 * @crtc: the CRTC
 *
 * This function disables FBC if it's associated with the provided CRTC.
 */
1453
void intel_fbc_disable(struct intel_crtc *crtc)
1454
{
1455 1456
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
	struct intel_plane *plane;
1457

1458 1459
	for_each_intel_plane(&i915->drm, plane) {
		struct intel_fbc *fbc = plane->fbc;
1460

1461 1462 1463 1464
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);
V
Ville Syrjälä 已提交
1465
		if (fbc->state.plane == plane)
1466 1467 1468
			__intel_fbc_disable(fbc);
		mutex_unlock(&fbc->lock);
	}
1469 1470
}

1471 1472 1473 1474 1475
void intel_fbc_update(struct intel_atomic_state *state,
		      struct intel_crtc *crtc)
{
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
1476 1477 1478
	const struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;
1479

1480 1481
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;
1482

1483 1484 1485 1486 1487 1488
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

		if (crtc_state->update_pipe && plane_state->no_fbc_reason) {
V
Ville Syrjälä 已提交
1489
			if (fbc->state.plane == plane)
1490 1491 1492 1493 1494 1495 1496
				__intel_fbc_disable(fbc);
		} else {
			__intel_fbc_enable(state, crtc, plane);
		}

		mutex_unlock(&fbc->lock);
	}
1497 1498
}

1499
/**
1500
 * intel_fbc_global_disable - globally disable FBC
V
Ville Syrjälä 已提交
1501
 * @i915: i915 device instance
1502 1503 1504
 *
 * This function disables FBC regardless of which CRTC is associated with it.
 */
V
Ville Syrjälä 已提交
1505
void intel_fbc_global_disable(struct drm_i915_private *i915)
1506
{
1507 1508
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1509

1510 1511 1512 1513 1514 1515
	for_each_intel_fbc(i915, fbc, fbc_id) {
		mutex_lock(&fbc->lock);
		if (fbc->state.plane)
			__intel_fbc_disable(fbc);
		mutex_unlock(&fbc->lock);
	}
1516 1517
}

1518 1519
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
1520 1521
	struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
	struct drm_i915_private *i915 = fbc->i915;
1522 1523 1524 1525

	mutex_lock(&fbc->lock);

	/* Maybe we were scheduled twice. */
V
Ville Syrjälä 已提交
1526
	if (fbc->underrun_detected || !fbc->state.plane)
1527 1528
		goto out;

V
Ville Syrjälä 已提交
1529
	drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
1530 1531
	fbc->underrun_detected = true;

1532
	intel_fbc_deactivate(fbc, "FIFO underrun");
1533
	if (!fbc->flip_pending)
V
Ville Syrjälä 已提交
1534
		intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
1535
	__intel_fbc_disable(fbc);
1536 1537 1538 1539
out:
	mutex_unlock(&fbc->lock);
}

1540
static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
1541
{
1542
	struct drm_i915_private *i915 = fbc->i915;
1543

1544
	cancel_work_sync(&fbc->underrun_work);
1545

1546
	mutex_lock(&fbc->lock);
1547

1548
	if (fbc->underrun_detected) {
V
Ville Syrjälä 已提交
1549
		drm_dbg_kms(&i915->drm,
1550
			    "Re-allowing FBC after fifo underrun\n");
1551
		fbc->no_fbc_reason = "FIFO underrun cleared";
1552 1553
	}

1554 1555
	fbc->underrun_detected = false;
	mutex_unlock(&fbc->lock);
1556 1557
}

1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
/*
 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
 * @i915: the i915 device
 *
 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
 * want to re-enable FBC after an underrun to increase test coverage.
 */
void intel_fbc_reset_underrun(struct drm_i915_private *i915)
{
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;

	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_reset_underrun(fbc);
}

static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
{
	/*
	 * There's no guarantee that underrun_detected won't be set to true
	 * right after this check and before the work is scheduled, but that's
	 * not a problem since we'll check it again under the work function
	 * while FBC is locked. This check here is just to prevent us from
	 * unnecessarily scheduling the work, and it relies on the fact that we
	 * never switch underrun_detect back to false after it's true.
	 */
	if (READ_ONCE(fbc->underrun_detected))
		return;

	schedule_work(&fbc->underrun_work);
}

1590 1591
/**
 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1592
 * @i915: i915 device
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
 *
 * Without FBC, most underruns are harmless and don't really cause too many
 * problems, except for an annoying message on dmesg. With FBC, underruns can
 * become black screens or even worse, especially when paired with bad
 * watermarks. So in order for us to be on the safe side, completely disable FBC
 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
 * already suggests that watermarks may be bad, so try to be as safe as
 * possible.
 *
 * This function is called from the IRQ handler.
 */
1604
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
1605
{
1606 1607
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1608

1609 1610
	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_handle_fifo_underrun_irq(fbc);
1611 1612
}

1613 1614 1615 1616 1617 1618 1619 1620 1621
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
V
Ville Syrjälä 已提交
1622
static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
1623
{
V
Ville Syrjälä 已提交
1624 1625
	if (i915->params.enable_fbc >= 0)
		return !!i915->params.enable_fbc;
1626

V
Ville Syrjälä 已提交
1627
	if (!HAS_FBC(i915))
1628 1629
		return 0;

V
Ville Syrjälä 已提交
1630
	if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
1631 1632 1633 1634 1635
		return 1;

	return 0;
}

V
Ville Syrjälä 已提交
1636
static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
1637 1638
{
	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1639
	if (intel_vtd_active() &&
V
Ville Syrjälä 已提交
1640 1641
	    (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
		drm_info(&i915->drm,
1642
			 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1643 1644 1645 1646 1647 1648
		return true;
	}

	return false;
}

1649 1650 1651 1652 1653 1654 1655 1656 1657
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
{
	if (!fbc)
		return;

	plane->fbc = fbc;
	fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}

1658 1659
static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
					  enum intel_fbc_id fbc_id)
1660 1661 1662 1663 1664 1665 1666
{
	struct intel_fbc *fbc;

	fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
	if (!fbc)
		return NULL;

1667
	fbc->id = fbc_id;
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
	fbc->i915 = i915;
	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
	mutex_init(&fbc->lock);

	if (DISPLAY_VER(i915) >= 7)
		fbc->funcs = &ivb_fbc_funcs;
	else if (DISPLAY_VER(i915) == 6)
		fbc->funcs = &snb_fbc_funcs;
	else if (DISPLAY_VER(i915) == 5)
		fbc->funcs = &ilk_fbc_funcs;
	else if (IS_G4X(i915))
		fbc->funcs = &g4x_fbc_funcs;
	else if (DISPLAY_VER(i915) == 4)
		fbc->funcs = &i965_fbc_funcs;
	else
		fbc->funcs = &i8xx_fbc_funcs;

	return fbc;
}

R
Rodrigo Vivi 已提交
1688 1689
/**
 * intel_fbc_init - Initialize FBC
V
Ville Syrjälä 已提交
1690
 * @i915: the i915 device
R
Rodrigo Vivi 已提交
1691 1692 1693
 *
 * This function might be called during PM init process.
 */
V
Ville Syrjälä 已提交
1694
void intel_fbc_init(struct drm_i915_private *i915)
1695
{
1696
	struct intel_fbc *fbc;
P
Paulo Zanoni 已提交
1697

V
Ville Syrjälä 已提交
1698 1699
	if (!drm_mm_initialized(&i915->mm.stolen))
		mkwrite_device_info(i915)->display.has_fbc = false;
1700

V
Ville Syrjälä 已提交
1701 1702
	if (need_fbc_vtd_wa(i915))
		mkwrite_device_info(i915)->display.has_fbc = false;
1703

V
Ville Syrjälä 已提交
1704 1705 1706
	i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
	drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
		    i915->params.enable_fbc);
1707

1708
	if (!HAS_FBC(i915))
1709 1710
		return;

1711
	fbc = intel_fbc_create(i915, INTEL_FBC_A);
1712 1713
	if (!fbc)
		return;
1714

1715
	/* We still don't have any sort of hardware state readout for FBC, so
1716 1717
	 * deactivate it in case the BIOS activated it to make sure software
	 * matches the hardware state. */
1718 1719
	if (intel_fbc_hw_is_active(fbc))
		intel_fbc_hw_deactivate(fbc);
1720

1721
	i915->fbc[fbc->id] = fbc;
1722
}
1723 1724 1725 1726 1727

static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
	struct intel_fbc *fbc = m->private;
	struct drm_i915_private *i915 = fbc->i915;
1728
	struct intel_plane *plane;
1729 1730
	intel_wakeref_t wakeref;

1731 1732
	drm_modeset_lock_all(&i915->drm);

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
	mutex_lock(&fbc->lock);

	if (fbc->active) {
		seq_puts(m, "FBC enabled\n");
		seq_printf(m, "Compressing: %s\n",
			   yesno(intel_fbc_is_compressing(fbc)));
	} else {
		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
	}

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	for_each_intel_plane(&i915->drm, plane) {
		const struct intel_plane_state *plane_state =
			to_intel_plane_state(plane->base.state);

		if (plane->fbc != fbc)
			continue;

		seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
			   fbc->state.plane == plane ? '*' : ' ',
			   plane->base.base.id, plane->base.name,
			   plane_state->no_fbc_reason ?: "FBC possible");
	}

1757 1758 1759
	mutex_unlock(&fbc->lock);
	intel_runtime_pm_put(&i915->runtime_pm, wakeref);

1760 1761
	drm_modeset_unlock_all(&i915->drm);

1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
	return 0;
}

DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);

static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
{
	struct intel_fbc *fbc = data;

	*val = fbc->false_color;

	return 0;
}

static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
{
	struct intel_fbc *fbc = data;

	mutex_lock(&fbc->lock);

	fbc->false_color = val;

	if (fbc->active)
		fbc->funcs->set_false_color(fbc, fbc->false_color);

	mutex_unlock(&fbc->lock);

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
			intel_fbc_debugfs_false_color_get,
			intel_fbc_debugfs_false_color_set,
			"%llu\n");

static void intel_fbc_debugfs_add(struct intel_fbc *fbc)
{
	struct drm_i915_private *i915 = fbc->i915;
	struct drm_minor *minor = i915->drm.primary;

	debugfs_create_file("i915_fbc_status", 0444,
			    minor->debugfs_root, fbc,
			    &intel_fbc_debugfs_status_fops);

	if (fbc->funcs->set_false_color)
		debugfs_create_file("i915_fbc_false_color", 0644,
				    minor->debugfs_root, fbc,
				    &intel_fbc_debugfs_false_color_fops);
}

void intel_fbc_debugfs_register(struct drm_i915_private *i915)
{
1814
	struct intel_fbc *fbc = i915->fbc[INTEL_FBC_A];
1815

1816
	if (fbc)
1817 1818
		intel_fbc_debugfs_add(fbc);
}