intel_fbc.c 48.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29
/**
 * DOC: Frame Buffer Compression (FBC)
 *
 * FBC tries to save memory bandwidth (and so power consumption) by
 * compressing the amount of memory used by the display. It is total
 * transparent to user space and completely handled in the kernel.
30 31
 *
 * The benefits of FBC are mostly visible with solid backgrounds and
R
Rodrigo Vivi 已提交
32 33
 * variation-less patterns. It comes from keeping the memory footprint small
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
R
Rodrigo Vivi 已提交
35 36 37 38
 * i915 is responsible to reserve stolen memory for FBC and configure its
 * offset on proper registers. The hardware takes care of all
 * compress/decompress. However there are many known cases where we have to
 * forcibly disable it to allow proper screen updates.
39 40
 */

L
Lucas De Marchi 已提交
41 42
#include <linux/string_helpers.h>

43 44
#include <drm/drm_fourcc.h>

R
Rodrigo Vivi 已提交
45
#include "i915_drv.h"
46
#include "i915_utils.h"
47
#include "i915_vgpu.h"
48
#include "intel_cdclk.h"
49
#include "intel_de.h"
50
#include "intel_display_trace.h"
51
#include "intel_display_types.h"
52
#include "intel_fbc.h"
53
#include "intel_frontbuffer.h"
R
Rodrigo Vivi 已提交
54

55 56 57
#define for_each_fbc_id(__dev_priv, __fbc_id) \
	for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
		for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id))
58 59

#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
60
	for_each_fbc_id((__dev_priv), (__fbc_id)) \
61 62
		for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)])

63
struct intel_fbc_funcs {
64 65 66 67 68 69 70
	void (*activate)(struct intel_fbc *fbc);
	void (*deactivate)(struct intel_fbc *fbc);
	bool (*is_active)(struct intel_fbc *fbc);
	bool (*is_compressing)(struct intel_fbc *fbc);
	void (*nuke)(struct intel_fbc *fbc);
	void (*program_cfb)(struct intel_fbc *fbc);
	void (*set_false_color)(struct intel_fbc *fbc, bool enable);
71 72
};

73
struct intel_fbc_state {
74
	struct intel_plane *plane;
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
	unsigned int cfb_stride;
	unsigned int cfb_size;
	unsigned int fence_y_offset;
	u16 override_cfb_stride;
	u16 interval;
	s8 fence_id;
};

struct intel_fbc {
	struct drm_i915_private *i915;
	const struct intel_fbc_funcs *funcs;

	/*
	 * This is always the inner lock when overlapping with
	 * struct_mutex and it's the outer lock when overlapping
	 * with stolen_lock.
	 */
	struct mutex lock;
	unsigned int busy_bits;

	struct drm_mm_node compressed_fb;
	struct drm_mm_node compressed_llb;

98 99
	enum intel_fbc_id id;

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	u8 limit;

	bool false_color;

	bool active;
	bool activated;
	bool flip_pending;

	bool underrun_detected;
	struct work_struct underrun_work;

	/*
	 * This structure contains everything that's relevant to program the
	 * hardware registers. When we want to figure out if we need to disable
	 * and re-enable FBC for a new configuration we just check if there's
	 * something different in the struct. The genx_fbc_activate functions
	 * are supposed to read from it in order to program the registers.
	 */
V
Ville Syrjälä 已提交
118
	struct intel_fbc_state state;
119 120 121
	const char *no_fbc_reason;
};

122 123
/* plane stride in pixels */
static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
124
{
125 126 127
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int stride;

128
	stride = plane_state->view.color_plane[0].mapping_stride;
129 130 131 132 133 134 135
	if (!drm_rotation_90_or_270(plane_state->hw.rotation))
		stride /= fb->format->cpp[0];

	return stride;
}

/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
136
static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
137 138 139
{
	unsigned int cpp = 4; /* FBC always 4 bytes per pixel */

140
	return intel_fbc_plane_stride(plane_state) * cpp;
141 142 143
}

/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
144
static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
145
{
146
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
147 148
	unsigned int limit = 4; /* 1:4 compression limit is the worst case */
	unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
149
	unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
150 151 152 153
	unsigned int height = 4; /* FBC segment is 4 lines */
	unsigned int stride;

	/* minimum segment stride we can use */
154
	stride = width * cpp * height / limit;
155

156 157 158 159 160 161 162
	/*
	 * Wa_16011863758: icl+
	 * Avoid some hardware segment address miscalculation.
	 */
	if (DISPLAY_VER(i915) >= 11)
		stride += 64;

163 164 165 166 167 168 169 170 171 172 173
	/*
	 * At least some of the platforms require each 4 line segment to
	 * be 512 byte aligned. Just do it always for simplicity.
	 */
	stride = ALIGN(stride, 512);

	/* convert back to single line equivalent with 1:1 compression limit */
	return stride * limit / height;
}

/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
174
static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
175
{
176 177
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	unsigned int stride = _intel_fbc_cfb_stride(plane_state);
178 179 180 181 182 183

	/*
	 * At least some of the platforms require each 4 line segment to
	 * be 512 byte aligned. Aligning each line to 512 bytes guarantees
	 * that regardless of the compression limit we choose later.
	 */
184
	if (DISPLAY_VER(i915) >= 9)
185
		return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
186 187 188 189
	else
		return stride;
}

190
static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
191
{
192 193
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
194

V
Ville Syrjälä 已提交
195
	if (DISPLAY_VER(i915) == 7)
196
		lines = min(lines, 2048);
V
Ville Syrjälä 已提交
197
	else if (DISPLAY_VER(i915) >= 8)
198
		lines = min(lines, 2560);
199

200
	return lines * intel_fbc_cfb_stride(plane_state);
201 202
}

203
static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
204
{
205 206 207 208
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
	unsigned int stride = _intel_fbc_cfb_stride(plane_state);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
209 210 211 212 213 214 215 216 217

	/*
	 * Override stride in 64 byte units per 4 line segment.
	 *
	 * Gen9 hw miscalculates cfb stride for linear as
	 * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
	 * we always need to use the override there.
	 */
	if (stride != stride_aligned ||
218
	    (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
219 220 221 222 223
		return stride_aligned * 4 / 64;

	return 0;
}

224
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
225
{
V
Ville Syrjälä 已提交
226
	const struct intel_fbc_state *fbc_state = &fbc->state;
227
	struct drm_i915_private *i915 = fbc->i915;
228 229 230
	unsigned int cfb_stride;
	u32 fbc_ctl;

V
Ville Syrjälä 已提交
231
	cfb_stride = fbc_state->cfb_stride / fbc->limit;
232 233 234 235 236 237 238 239

	/* FBC_CTL wants 32B or 64B units */
	if (DISPLAY_VER(i915) == 2)
		cfb_stride = (cfb_stride / 32) - 1;
	else
		cfb_stride = (cfb_stride / 64) - 1;

	fbc_ctl = FBC_CTL_PERIODIC |
V
Ville Syrjälä 已提交
240
		FBC_CTL_INTERVAL(fbc_state->interval) |
241 242 243 244 245
		FBC_CTL_STRIDE(cfb_stride);

	if (IS_I945GM(i915))
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */

V
Ville Syrjälä 已提交
246 247
	if (fbc_state->fence_id >= 0)
		fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
248 249 250 251

	return fbc_ctl;
}

252
static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
253
{
V
Ville Syrjälä 已提交
254
	const struct intel_fbc_state *fbc_state = &fbc->state;
255 256 257
	u32 fbc_ctl2;

	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
V
Ville Syrjälä 已提交
258
		FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
259

V
Ville Syrjälä 已提交
260
	if (fbc_state->fence_id >= 0)
261
		fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
262 263 264 265

	return fbc_ctl2;
}

266
static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
267
{
268
	struct drm_i915_private *i915 = fbc->i915;
269 270 271
	u32 fbc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
272
	fbc_ctl = intel_de_read(i915, FBC_CONTROL);
273 274 275 276
	if ((fbc_ctl & FBC_CTL_EN) == 0)
		return;

	fbc_ctl &= ~FBC_CTL_EN;
V
Ville Syrjälä 已提交
277
	intel_de_write(i915, FBC_CONTROL, fbc_ctl);
278 279

	/* Wait for compressing bit to clear */
V
Ville Syrjälä 已提交
280
	if (intel_de_wait_for_clear(i915, FBC_STATUS,
281
				    FBC_STAT_COMPRESSING, 10)) {
V
Ville Syrjälä 已提交
282
		drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
283 284 285 286
		return;
	}
}

287
static void i8xx_fbc_activate(struct intel_fbc *fbc)
288
{
V
Ville Syrjälä 已提交
289
	const struct intel_fbc_state *fbc_state = &fbc->state;
290
	struct drm_i915_private *i915 = fbc->i915;
291 292 293 294
	int i;

	/* Clear old tags */
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
V
Ville Syrjälä 已提交
295
		intel_de_write(i915, FBC_TAG(i), 0);
296

V
Ville Syrjälä 已提交
297 298
	if (DISPLAY_VER(i915) == 4) {
		intel_de_write(i915, FBC_CONTROL2,
299
			       i965_fbc_ctl2(fbc));
V
Ville Syrjälä 已提交
300
		intel_de_write(i915, FBC_FENCE_OFF,
V
Ville Syrjälä 已提交
301
			       fbc_state->fence_y_offset);
302 303
	}

V
Ville Syrjälä 已提交
304
	intel_de_write(i915, FBC_CONTROL,
305
		       FBC_CTL_EN | i8xx_fbc_ctl(fbc));
306 307
}

308
static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
309
{
310
	return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
311 312
}

313
static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
314
{
315
	return intel_de_read(fbc->i915, FBC_STATUS) &
316 317 318
		(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
}

319
static void i8xx_fbc_nuke(struct intel_fbc *fbc)
320
{
V
Ville Syrjälä 已提交
321 322
	struct intel_fbc_state *fbc_state = &fbc->state;
	enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
323
	struct drm_i915_private *dev_priv = fbc->i915;
324 325 326 327 328 329 330

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
			  intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
	spin_unlock_irq(&dev_priv->uncore.lock);
}

331
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
332
{
333
	struct drm_i915_private *i915 = fbc->i915;
334 335 336 337 338 339 340 341 342 343 344 345

	GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
					 fbc->compressed_fb.start, U32_MAX));
	GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
					 fbc->compressed_llb.start, U32_MAX));

	intel_de_write(i915, FBC_CFB_BASE,
		       i915->dsm.start + fbc->compressed_fb.start);
	intel_de_write(i915, FBC_LL_BASE,
		       i915->dsm.start + fbc->compressed_llb.start);
}

346 347 348 349 350
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
	.activate = i8xx_fbc_activate,
	.deactivate = i8xx_fbc_deactivate,
	.is_active = i8xx_fbc_is_active,
	.is_compressing = i8xx_fbc_is_compressing,
351
	.nuke = i8xx_fbc_nuke,
352
	.program_cfb = i8xx_fbc_program_cfb,
353 354
};

355
static void i965_fbc_nuke(struct intel_fbc *fbc)
356
{
V
Ville Syrjälä 已提交
357 358
	struct intel_fbc_state *fbc_state = &fbc->state;
	enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
359
	struct drm_i915_private *dev_priv = fbc->i915;
360 361 362 363 364 365 366 367 368 369 370 371 372

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
			  intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
	spin_unlock_irq(&dev_priv->uncore.lock);
}

static const struct intel_fbc_funcs i965_fbc_funcs = {
	.activate = i8xx_fbc_activate,
	.deactivate = i8xx_fbc_deactivate,
	.is_active = i8xx_fbc_is_active,
	.is_compressing = i8xx_fbc_is_compressing,
	.nuke = i965_fbc_nuke,
373
	.program_cfb = i8xx_fbc_program_cfb,
374 375
};

376
static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
377
{
378
	switch (fbc->limit) {
379
	default:
380
		MISSING_CASE(fbc->limit);
381 382 383 384 385 386 387 388 389 390
		fallthrough;
	case 1:
		return DPFC_CTL_LIMIT_1X;
	case 2:
		return DPFC_CTL_LIMIT_2X;
	case 4:
		return DPFC_CTL_LIMIT_4X;
	}
}

391
static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
392
{
V
Ville Syrjälä 已提交
393
	const struct intel_fbc_state *fbc_state = &fbc->state;
394
	struct drm_i915_private *i915 = fbc->i915;
395 396
	u32 dpfc_ctl;

397
	dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
V
Ville Syrjälä 已提交
398
		DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
399

400
	if (IS_G4X(i915))
401
		dpfc_ctl |= DPFC_CTL_SR_EN;
402

V
Ville Syrjälä 已提交
403
	if (fbc_state->fence_id >= 0) {
404
		dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
405 406

		if (DISPLAY_VER(i915) < 6)
V
Ville Syrjälä 已提交
407
			dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
408 409 410 411 412
	}

	return dpfc_ctl;
}

413
static void g4x_fbc_activate(struct intel_fbc *fbc)
414
{
V
Ville Syrjälä 已提交
415
	const struct intel_fbc_state *fbc_state = &fbc->state;
416
	struct drm_i915_private *i915 = fbc->i915;
417

V
Ville Syrjälä 已提交
418
	intel_de_write(i915, DPFC_FENCE_YOFF,
V
Ville Syrjälä 已提交
419
		       fbc_state->fence_y_offset);
420

V
Ville Syrjälä 已提交
421
	intel_de_write(i915, DPFC_CONTROL,
422
		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
423 424
}

425
static void g4x_fbc_deactivate(struct intel_fbc *fbc)
426
{
427
	struct drm_i915_private *i915 = fbc->i915;
428 429 430
	u32 dpfc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
431
	dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
432 433
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
V
Ville Syrjälä 已提交
434
		intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
435 436 437
	}
}

438
static bool g4x_fbc_is_active(struct intel_fbc *fbc)
439
{
440
	return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
441 442
}

443
static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
444
{
445
	return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
446 447
}

448
static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
449
{
450
	struct drm_i915_private *i915 = fbc->i915;
451 452 453 454

	intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
}

455 456 457 458 459
static const struct intel_fbc_funcs g4x_fbc_funcs = {
	.activate = g4x_fbc_activate,
	.deactivate = g4x_fbc_deactivate,
	.is_active = g4x_fbc_is_active,
	.is_compressing = g4x_fbc_is_compressing,
460
	.nuke = i965_fbc_nuke,
461
	.program_cfb = g4x_fbc_program_cfb,
462 463
};

464
static void ilk_fbc_activate(struct intel_fbc *fbc)
465
{
V
Ville Syrjälä 已提交
466
	struct intel_fbc_state *fbc_state = &fbc->state;
467
	struct drm_i915_private *i915 = fbc->i915;
468

469
	intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
V
Ville Syrjälä 已提交
470
		       fbc_state->fence_y_offset);
471

472
	intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
473
		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
474 475
}

476
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
477
{
478
	struct drm_i915_private *i915 = fbc->i915;
479 480 481
	u32 dpfc_ctl;

	/* Disable compression */
482
	dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
483 484
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
485
		intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
486 487 488
	}
}

489
static bool ilk_fbc_is_active(struct intel_fbc *fbc)
490
{
491
	return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
492 493
}

494
static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
495
{
496
	return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
497 498
}

499
static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
500
{
501
	struct drm_i915_private *i915 = fbc->i915;
502

503
	intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
504 505
}

506 507 508 509 510
static const struct intel_fbc_funcs ilk_fbc_funcs = {
	.activate = ilk_fbc_activate,
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
	.is_compressing = ilk_fbc_is_compressing,
511
	.nuke = i965_fbc_nuke,
512
	.program_cfb = ilk_fbc_program_cfb,
513 514
};

515
static void snb_fbc_program_fence(struct intel_fbc *fbc)
516
{
V
Ville Syrjälä 已提交
517
	const struct intel_fbc_state *fbc_state = &fbc->state;
518
	struct drm_i915_private *i915 = fbc->i915;
519 520
	u32 ctl = 0;

V
Ville Syrjälä 已提交
521 522
	if (fbc_state->fence_id >= 0)
		ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
523 524

	intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
V
Ville Syrjälä 已提交
525
	intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
526 527
}

528
static void snb_fbc_activate(struct intel_fbc *fbc)
529
{
530
	snb_fbc_program_fence(fbc);
531

532
	ilk_fbc_activate(fbc);
533 534
}

535
static void snb_fbc_nuke(struct intel_fbc *fbc)
536
{
537 538
	struct drm_i915_private *i915 = fbc->i915;

539 540
	intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
	intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
541 542 543 544 545 546 547 548
}

static const struct intel_fbc_funcs snb_fbc_funcs = {
	.activate = snb_fbc_activate,
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
	.is_compressing = ilk_fbc_is_compressing,
	.nuke = snb_fbc_nuke,
549
	.program_cfb = ilk_fbc_program_cfb,
550 551
};

552
static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
553
{
V
Ville Syrjälä 已提交
554
	const struct intel_fbc_state *fbc_state = &fbc->state;
555
	struct drm_i915_private *i915 = fbc->i915;
556
	u32 val = 0;
557

V
Ville Syrjälä 已提交
558
	if (fbc_state->override_cfb_stride)
559
		val |= FBC_STRIDE_OVERRIDE |
V
Ville Syrjälä 已提交
560
			FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
561

562
	intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
563
}
564

565
static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
566
{
V
Ville Syrjälä 已提交
567
	const struct intel_fbc_state *fbc_state = &fbc->state;
568
	struct drm_i915_private *i915 = fbc->i915;
569
	u32 val = 0;
570

571
	/* Display WA #0529: skl, kbl, bxt. */
V
Ville Syrjälä 已提交
572
	if (fbc_state->override_cfb_stride)
573
		val |= CHICKEN_FBC_STRIDE_OVERRIDE |
V
Ville Syrjälä 已提交
574
			CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
575

576 577 578 579 580
	intel_de_rmw(i915, CHICKEN_MISC_4,
		     CHICKEN_FBC_STRIDE_OVERRIDE |
		     CHICKEN_FBC_STRIDE_MASK, val);
}

581
static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
582
{
V
Ville Syrjälä 已提交
583
	const struct intel_fbc_state *fbc_state = &fbc->state;
584
	struct drm_i915_private *i915 = fbc->i915;
585 586
	u32 dpfc_ctl;

587
	dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
588

589
	if (IS_IVYBRIDGE(i915))
V
Ville Syrjälä 已提交
590
		dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
591

V
Ville Syrjälä 已提交
592
	if (fbc_state->fence_id >= 0)
593
		dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
594

595
	if (fbc->false_color)
596
		dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
597

598 599 600
	return dpfc_ctl;
}

601
static void ivb_fbc_activate(struct intel_fbc *fbc)
602
{
603 604
	struct drm_i915_private *i915 = fbc->i915;

V
Ville Syrjälä 已提交
605
	if (DISPLAY_VER(i915) >= 10)
606
		glk_fbc_program_cfb_stride(fbc);
V
Ville Syrjälä 已提交
607
	else if (DISPLAY_VER(i915) == 9)
608
		skl_fbc_program_cfb_stride(fbc);
609

610
	if (to_gt(i915)->ggtt->num_fences)
611
		snb_fbc_program_fence(fbc);
612

613
	intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
614
		       DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
615 616
}

617
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
618
{
619
	return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
620 621
}

622
static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
623 624
				    bool enable)
{
625
	intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
626
		     DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
627 628
}

V
Ville Syrjälä 已提交
629 630
static const struct intel_fbc_funcs ivb_fbc_funcs = {
	.activate = ivb_fbc_activate,
631 632
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
V
Ville Syrjälä 已提交
633
	.is_compressing = ivb_fbc_is_compressing,
634
	.nuke = snb_fbc_nuke,
635
	.program_cfb = ilk_fbc_program_cfb,
636
	.set_false_color = ivb_fbc_set_false_color,
637 638
};

639
static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
640
{
641
	return fbc->funcs->is_active(fbc);
642 643
}

644
static void intel_fbc_hw_activate(struct intel_fbc *fbc)
645
{
V
Ville Syrjälä 已提交
646
	trace_intel_fbc_activate(fbc->state.plane);
647

648
	fbc->active = true;
649
	fbc->activated = true;
650

651
	fbc->funcs->activate(fbc);
652 653
}

654
static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
655
{
V
Ville Syrjälä 已提交
656
	trace_intel_fbc_deactivate(fbc->state.plane);
657

658 659
	fbc->active = false;

660
	fbc->funcs->deactivate(fbc);
661 662
}

663
static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
664
{
665
	return fbc->funcs->is_compressing(fbc);
666 667
}

668
static void intel_fbc_nuke(struct intel_fbc *fbc)
669
{
V
Ville Syrjälä 已提交
670
	trace_intel_fbc_nuke(fbc->state.plane);
671

672
	fbc->funcs->nuke(fbc);
673 674
}

675
static void intel_fbc_activate(struct intel_fbc *fbc)
676
{
677 678
	intel_fbc_hw_activate(fbc);
	intel_fbc_nuke(fbc);
V
Ville Syrjälä 已提交
679 680

	fbc->no_fbc_reason = NULL;
681 682
}

683
static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
P
Paulo Zanoni 已提交
684
{
685
	struct drm_i915_private *i915 = fbc->i915;
686

V
Ville Syrjälä 已提交
687
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
P
Paulo Zanoni 已提交
688

689
	if (fbc->active)
690
		intel_fbc_hw_deactivate(fbc);
691 692

	fbc->no_fbc_reason = reason;
693 694
}

695 696
static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
{
697
	if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
698 699 700 701 702
		return BIT_ULL(28);
	else
		return BIT_ULL(32);
}

V
Ville Syrjälä 已提交
703
static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
704
{
705 706 707 708 709 710
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
V
Ville Syrjälä 已提交
711 712 713
	if (IS_BROADWELL(i915) ||
	    (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
		end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
714
	else
715
		end = U64_MAX;
716

V
Ville Syrjälä 已提交
717
	return min(end, intel_fbc_cfb_base_max(i915));
718 719
}

720
static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
721
{
V
Ville Syrjälä 已提交
722
	return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
723 724
}

V
Ville Syrjälä 已提交
725
static int intel_fbc_max_limit(struct drm_i915_private *i915)
726 727
{
	/* WaFbcOnly1to1Ratio:ctg */
V
Ville Syrjälä 已提交
728
	if (IS_G4X(i915))
729 730
		return 1;

731 732 733 734
	/*
	 * FBC2 can only do 1:1, 1:2, 1:4, we limit
	 * FBC1 to the same out of convenience.
	 */
735
	return 4;
736 737
}

738
static int find_compression_limit(struct intel_fbc *fbc,
739
				  unsigned int size, int min_limit)
740
{
741
	struct drm_i915_private *i915 = fbc->i915;
V
Ville Syrjälä 已提交
742
	u64 end = intel_fbc_stolen_end(i915);
743 744 745
	int ret, limit = min_limit;

	size /= limit;
746 747

	/* Try to over-allocate to reduce reallocations and fragmentation. */
V
Ville Syrjälä 已提交
748
	ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
749
						   size <<= 1, 4096, 0, end);
750
	if (ret == 0)
751
		return limit;
752

V
Ville Syrjälä 已提交
753 754
	for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
		ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
755 756 757
							   size >>= 1, 4096, 0, end);
		if (ret == 0)
			return limit;
758
	}
759 760

	return 0;
761 762
}

763
static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
764
			       unsigned int size, int min_limit)
765
{
766
	struct drm_i915_private *i915 = fbc->i915;
767
	int ret;
768

V
Ville Syrjälä 已提交
769
	drm_WARN_ON(&i915->drm,
770
		    drm_mm_node_allocated(&fbc->compressed_fb));
V
Ville Syrjälä 已提交
771
	drm_WARN_ON(&i915->drm,
772
		    drm_mm_node_allocated(&fbc->compressed_llb));
773

V
Ville Syrjälä 已提交
774 775
	if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
		ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
776 777 778 779 780
						  4096, 4096);
		if (ret)
			goto err;
	}

781
	ret = find_compression_limit(fbc, size, min_limit);
782 783
	if (!ret)
		goto err_llb;
784
	else if (ret > min_limit)
V
Ville Syrjälä 已提交
785
		drm_info_once(&i915->drm,
786
			      "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
787

788
	fbc->limit = ret;
789

V
Ville Syrjälä 已提交
790
	drm_dbg_kms(&i915->drm,
791 792
		    "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
		    fbc->compressed_fb.size, fbc->limit);
793 794 795 796

	return 0;

err_llb:
797
	if (drm_mm_node_allocated(&fbc->compressed_llb))
V
Ville Syrjälä 已提交
798
		i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
799
err:
V
Ville Syrjälä 已提交
800 801
	if (drm_mm_initialized(&i915->mm.stolen))
		drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
802 803 804
	return -ENOSPC;
}

805
static void intel_fbc_program_cfb(struct intel_fbc *fbc)
806
{
807
	fbc->funcs->program_cfb(fbc);
808 809
}

810
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
811
{
812
	struct drm_i915_private *i915 = fbc->i915;
813

814
	if (WARN_ON(intel_fbc_hw_is_active(fbc)))
815 816
		return;

817
	if (drm_mm_node_allocated(&fbc->compressed_llb))
V
Ville Syrjälä 已提交
818
		i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
819
	if (drm_mm_node_allocated(&fbc->compressed_fb))
V
Ville Syrjälä 已提交
820
		i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
821 822
}

823
void intel_fbc_cleanup(struct drm_i915_private *i915)
P
Paulo Zanoni 已提交
824
{
825 826
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
827

828 829 830 831
	for_each_intel_fbc(i915, fbc, fbc_id) {
		mutex_lock(&fbc->lock);
		__intel_fbc_cleanup_cfb(fbc);
		mutex_unlock(&fbc->lock);
832

833 834
		kfree(fbc);
	}
P
Paulo Zanoni 已提交
835 836
}

837
static bool stride_is_valid(const struct intel_plane_state *plane_state)
838
{
839 840 841 842 843
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int stride = intel_fbc_plane_stride(plane_state) *
		fb->format->cpp[0];

844
	/* This should have been caught earlier. */
V
Ville Syrjälä 已提交
845
	if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0))
846
		return false;
847 848

	/* Below are the additional FBC restrictions. */
849 850
	if (stride < 512)
		return false;
851

V
Ville Syrjälä 已提交
852
	if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3)
853 854
		return stride == 4096 || stride == 8192;

V
Ville Syrjälä 已提交
855
	if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048)
856 857
		return false;

858
	/* Display WA #1105: skl,bxt,kbl,cfl,glk */
V
Ville Syrjälä 已提交
859
	if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) &&
860
	    fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
861 862
		return false;

863 864 865 866 867 868
	if (stride > 16384)
		return false;

	return true;
}

869
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
870
{
871 872 873 874
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;

	switch (fb->format->format) {
875 876 877 878 879 880
	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_XBGR8888:
		return true;
	case DRM_FORMAT_XRGB1555:
	case DRM_FORMAT_RGB565:
		/* 16bpp not supported on gen2 */
V
Ville Syrjälä 已提交
881
		if (DISPLAY_VER(i915) == 2)
882 883
			return false;
		/* WaFbcOnly1to1Ratio:ctg */
V
Ville Syrjälä 已提交
884
		if (IS_G4X(i915))
885 886 887 888 889 890 891
			return false;
		return true;
	default:
		return false;
	}
}

892
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
893
{
894 895 896 897 898
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int rotation = plane_state->hw.rotation;

	if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 &&
899 900
	    drm_rotation_90_or_270(rotation))
		return false;
V
Ville Syrjälä 已提交
901
	else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) &&
902 903 904 905 906 907
		 rotation != DRM_MODE_ROTATE_0)
		return false;

	return true;
}

908 909 910
/*
 * For some reason, the hardware tracking starts looking at whatever we
 * programmed as the display plane base address register. It does not look at
911 912
 * the X and Y offset registers. That's why we include the src x/y offsets
 * instead of just looking at the plane size.
913
 */
914
static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
915
{
916
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
917
	unsigned int effective_w, effective_h, max_w, max_h;
918

V
Ville Syrjälä 已提交
919
	if (DISPLAY_VER(i915) >= 10) {
920 921
		max_w = 5120;
		max_h = 4096;
V
Ville Syrjälä 已提交
922
	} else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
923 924
		max_w = 4096;
		max_h = 4096;
V
Ville Syrjälä 已提交
925
	} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
926 927 928 929 930 931 932
		max_w = 4096;
		max_h = 2048;
	} else {
		max_w = 2048;
		max_h = 1536;
	}

933 934 935 936
	effective_w = plane_state->view.color_plane[0].x +
		(drm_rect_width(&plane_state->uapi.src) >> 16);
	effective_h = plane_state->view.color_plane[0].y +
		(drm_rect_height(&plane_state->uapi.src) >> 16);
937 938

	return effective_w <= max_w && effective_h <= max_h;
939 940
}

941
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
942
{
943 944 945 946
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;

	switch (fb->modifier) {
947 948
	case DRM_FORMAT_MOD_LINEAR:
	case I915_FORMAT_MOD_Y_TILED:
949
	case I915_FORMAT_MOD_Yf_TILED:
V
Ville Syrjälä 已提交
950
		return DISPLAY_VER(i915) >= 9;
951
	case I915_FORMAT_MOD_4_TILED:
952
	case I915_FORMAT_MOD_X_TILED:
953 954 955 956 957 958
		return true;
	default:
		return false;
	}
}

V
Ville Syrjälä 已提交
959 960 961
static void intel_fbc_update_state(struct intel_atomic_state *state,
				   struct intel_crtc *crtc,
				   struct intel_plane *plane)
962
{
963 964 965 966 967 968
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	struct intel_fbc *fbc = plane->fbc;
V
Ville Syrjälä 已提交
969
	struct intel_fbc_state *fbc_state = &fbc->state;
970

V
Ville Syrjälä 已提交
971
	WARN_ON(plane_state->no_fbc_reason);
972

V
Ville Syrjälä 已提交
973
	fbc_state->plane = plane;
974

975
	/* FBC1 compression interval: arbitrary choice of 1 second */
V
Ville Syrjälä 已提交
976
	fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
977

V
Ville Syrjälä 已提交
978
	fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
979

V
Ville Syrjälä 已提交
980
	drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
981
		    !plane_state->ggtt_vma->fence);
982 983

	if (plane_state->flags & PLANE_HAS_FENCE &&
984
	    plane_state->ggtt_vma->fence)
V
Ville Syrjälä 已提交
985
		fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
986
	else
V
Ville Syrjälä 已提交
987
		fbc_state->fence_id = -1;
988

V
Ville Syrjälä 已提交
989 990 991
	fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
	fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
	fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
992 993
}

V
Ville Syrjälä 已提交
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
{
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);

	/* The use of a CPU fence is one of two ways to detect writes by the
	 * CPU to the scanout and trigger updates to the FBC.
	 *
	 * The other method is by software tracking (see
	 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
	 * the current compressed buffer and recompress it.
	 *
	 * Note that is possible for a tiled surface to be unmappable (and
	 * so have no fence associated with it) due to aperture constraints
	 * at the time of pinning.
	 *
	 * FIXME with 90/270 degree rotation we should use the fence on
	 * the normal GTT view (the rotated view doesn't even have a
	 * fence). Would need changes to the FBC fence Y offset as well.
	 * For now this will effectively disable FBC with 90/270 degree
	 * rotation.
	 */
	return DISPLAY_VER(i915) >= 9 ||
		(plane_state->flags & PLANE_HAS_FENCE &&
		 plane_state->ggtt_vma->fence);
}

static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
{
	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
	struct intel_fbc *fbc = plane->fbc;

	return intel_fbc_min_limit(plane_state) <= fbc->limit &&
		intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
}

static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
1030
{
V
Ville Syrjälä 已提交
1031 1032 1033
	return !plane_state->no_fbc_reason &&
		intel_fbc_is_fence_ok(plane_state) &&
		intel_fbc_is_cfb_ok(plane_state);
1034 1035
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
static int intel_fbc_check_plane(struct intel_atomic_state *state,
				 struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
	const struct intel_crtc_state *crtc_state;
	struct intel_fbc *fbc = plane->fbc;

	if (!fbc)
		return 0;

1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
	if (intel_vgpu_active(i915)) {
		plane_state->no_fbc_reason = "VGPU active";
		return 0;
	}

	if (!i915->params.enable_fbc) {
		plane_state->no_fbc_reason = "disabled per module param or by default";
		return 0;
	}

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
	if (!plane_state->uapi.visible) {
		plane_state->no_fbc_reason = "plane not visible";
		return 0;
	}

	crtc_state = intel_atomic_get_new_crtc_state(state, crtc);

	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
		plane_state->no_fbc_reason = "interlaced mode not supported";
		return 0;
	}

1072 1073 1074 1075 1076
	if (crtc_state->double_wide) {
		plane_state->no_fbc_reason = "double wide pipe not supported";
		return 0;
	}

1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
	/*
	 * Display 12+ is not supporting FBC with PSR2.
	 * Recommendation is to keep this combination disabled
	 * Bspec: 50422 HSD: 14010260002
	 */
	if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
		plane_state->no_fbc_reason = "PSR2 enabled";
		return false;
	}

	if (!pixel_format_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "pixel format not supported";
		return 0;
	}

	if (!tiling_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "tiling not supported";
		return 0;
	}

	if (!rotation_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "rotation not supported";
		return 0;
	}

	if (!stride_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "stride not supported";
		return 0;
	}

	if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
	    fb->format->has_alpha) {
		plane_state->no_fbc_reason = "per-pixel alpha not supported";
		return false;
	}

	if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
		plane_state->no_fbc_reason = "plane size too big";
		return 0;
	}

	/*
	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
	 * and screen flicker.
	 */
	if (DISPLAY_VER(i915) >= 9 &&
	    plane_state->view.color_plane[0].y & 3) {
		plane_state->no_fbc_reason = "plane start Y offset misaligned";
		return false;
	}

	/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
	if (DISPLAY_VER(i915) >= 11 &&
1131 1132
	    (plane_state->view.color_plane[0].y +
	     (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
		plane_state->no_fbc_reason = "plane end Y offset misaligned";
		return false;
	}

	/* WaFbcExceedCdClockThreshold:hsw,bdw */
	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
		const struct intel_cdclk_state *cdclk_state;

		cdclk_state = intel_atomic_get_cdclk_state(state);
		if (IS_ERR(cdclk_state))
			return PTR_ERR(cdclk_state);

		if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
			plane_state->no_fbc_reason = "pixel rate too high";
			return 0;
		}
	}

	plane_state->no_fbc_reason = NULL;

	return 0;
}

1156

1157 1158 1159
static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
				    struct intel_crtc *crtc,
				    struct intel_plane *plane)
1160
{
1161 1162 1163 1164 1165 1166 1167 1168
	const struct intel_crtc_state *new_crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *old_plane_state =
		intel_atomic_get_old_plane_state(state, plane);
	const struct intel_plane_state *new_plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
	const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
1169

1170
	if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
1171 1172
		return false;

V
Ville Syrjälä 已提交
1173 1174
	if (!intel_fbc_is_ok(old_plane_state) ||
	    !intel_fbc_is_ok(new_plane_state))
1175 1176 1177
		return false;

	if (old_fb->format->format != new_fb->format->format)
1178 1179
		return false;

1180
	if (old_fb->modifier != new_fb->modifier)
1181 1182
		return false;

1183 1184
	if (intel_fbc_plane_stride(old_plane_state) !=
	    intel_fbc_plane_stride(new_plane_state))
1185 1186
		return false;

V
Ville Syrjälä 已提交
1187 1188
	if (intel_fbc_cfb_stride(old_plane_state) !=
	    intel_fbc_cfb_stride(new_plane_state))
1189 1190
		return false;

V
Ville Syrjälä 已提交
1191 1192
	if (intel_fbc_cfb_size(old_plane_state) !=
	    intel_fbc_cfb_size(new_plane_state))
1193 1194
		return false;

V
Ville Syrjälä 已提交
1195 1196
	if (intel_fbc_override_cfb_stride(old_plane_state) !=
	    intel_fbc_override_cfb_stride(new_plane_state))
1197 1198 1199 1200 1201
		return false;

	return true;
}

1202 1203 1204
static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
				   struct intel_crtc *crtc,
				   struct intel_plane *plane)
1205
{
1206
	struct drm_i915_private *i915 = to_i915(state->base.dev);
1207
	struct intel_fbc *fbc = plane->fbc;
1208
	bool need_vblank_wait = false;
1209

1210
	fbc->flip_pending = true;
1211

1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
	if (intel_fbc_can_flip_nuke(state, crtc, plane))
		return need_vblank_wait;

	intel_fbc_deactivate(fbc, "update pending");

	/*
	 * Display WA #1198: glk+
	 * Need an extra vblank wait between FBC disable and most plane
	 * updates. Bspec says this is only needed for plane disable, but
	 * that is not true. Touching most plane registers will cause the
	 * corruption to appear. Also SKL/derivatives do not seem to be
	 * affected.
	 *
	 * TODO: could optimize this a bit by sampling the frame
	 * counter when we disable FBC (if it was already done earlier)
	 * and skipping the extra vblank wait before the plane update
	 * if at least one frame has already passed.
	 */
	if (fbc->activated && DISPLAY_VER(i915) >= 10)
		need_vblank_wait = true;
	fbc->activated = false;
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252

	return need_vblank_wait;
}

bool intel_fbc_pre_update(struct intel_atomic_state *state,
			  struct intel_crtc *crtc)
{
	const struct intel_plane_state *plane_state;
	bool need_vblank_wait = false;
	struct intel_plane *plane;
	int i;

	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;

		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

V
Ville Syrjälä 已提交
1253
		if (fbc->state.plane == plane)
1254 1255 1256 1257
			need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);

		mutex_unlock(&fbc->lock);
	}
1258 1259

	return need_vblank_wait;
1260 1261
}

1262
static void __intel_fbc_disable(struct intel_fbc *fbc)
1263
{
1264
	struct drm_i915_private *i915 = fbc->i915;
V
Ville Syrjälä 已提交
1265
	struct intel_plane *plane = fbc->state.plane;
1266

V
Ville Syrjälä 已提交
1267 1268
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
	drm_WARN_ON(&i915->drm, fbc->active);
1269

1270 1271
	drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
		    plane->base.base.id, plane->base.name);
1272

1273
	__intel_fbc_cleanup_cfb(fbc);
1274

V
Ville Syrjälä 已提交
1275
	fbc->state.plane = NULL;
1276
	fbc->busy_bits = 0;
1277 1278
}

1279
static void __intel_fbc_post_update(struct intel_fbc *fbc)
1280
{
1281
	struct drm_i915_private *i915 = fbc->i915;
1282

V
Ville Syrjälä 已提交
1283
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
1284

1285
	if (!fbc->busy_bits)
1286
		intel_fbc_activate(fbc);
1287
	else
1288
		intel_fbc_deactivate(fbc, "frontbuffer write");
P
Paulo Zanoni 已提交
1289 1290
}

1291 1292
void intel_fbc_post_update(struct intel_atomic_state *state,
			   struct intel_crtc *crtc)
P
Paulo Zanoni 已提交
1293
{
1294 1295 1296
	const struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;
1297

1298 1299
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;
1300

1301 1302 1303 1304 1305
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

V
Ville Syrjälä 已提交
1306
		if (fbc->state.plane == plane) {
1307 1308 1309 1310 1311
			fbc->flip_pending = false;
			__intel_fbc_post_update(fbc);
		}

		mutex_unlock(&fbc->lock);
1312
	}
1313 1314
}

1315 1316
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
V
Ville Syrjälä 已提交
1317 1318
	if (fbc->state.plane)
		return fbc->state.plane->frontbuffer_bit;
1319
	else
1320
		return 0;
1321 1322
}

1323 1324 1325
static void __intel_fbc_invalidate(struct intel_fbc *fbc,
				   unsigned int frontbuffer_bits,
				   enum fb_op_origin origin)
1326
{
1327
	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1328 1329
		return;

1330
	mutex_lock(&fbc->lock);
P
Paulo Zanoni 已提交
1331

1332 1333 1334
	frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
	if (!frontbuffer_bits)
		goto out;
1335

1336 1337
	fbc->busy_bits |= frontbuffer_bits;
	intel_fbc_deactivate(fbc, "frontbuffer write");
P
Paulo Zanoni 已提交
1338

1339
out:
1340
	mutex_unlock(&fbc->lock);
1341 1342
}

1343 1344 1345
void intel_fbc_invalidate(struct drm_i915_private *i915,
			  unsigned int frontbuffer_bits,
			  enum fb_op_origin origin)
1346
{
1347 1348
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1349

1350 1351 1352 1353
	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_invalidate(fbc, frontbuffer_bits, origin);

}
1354

1355 1356 1357 1358
static void __intel_fbc_flush(struct intel_fbc *fbc,
			      unsigned int frontbuffer_bits,
			      enum fb_op_origin origin)
{
1359
	mutex_lock(&fbc->lock);
1360

1361 1362 1363 1364
	frontbuffer_bits &= intel_fbc_get_frontbuffer_bit(fbc);
	if (!frontbuffer_bits)
		goto out;

1365
	fbc->busy_bits &= ~frontbuffer_bits;
1366

1367
	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1368 1369
		goto out;

1370 1371 1372 1373 1374 1375 1376
	if (fbc->busy_bits)
		goto out;

	if (fbc->active)
		intel_fbc_nuke(fbc);
	else if (!fbc->flip_pending)
		__intel_fbc_post_update(fbc);
P
Paulo Zanoni 已提交
1377

1378
out:
1379
	mutex_unlock(&fbc->lock);
1380 1381
}

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
void intel_fbc_flush(struct drm_i915_private *i915,
		     unsigned int frontbuffer_bits,
		     enum fb_op_origin origin)
{
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;

	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_flush(fbc, frontbuffer_bits, origin);
}

1393
int intel_fbc_atomic_check(struct intel_atomic_state *state)
1394
{
1395
	struct intel_plane_state *plane_state;
1396
	struct intel_plane *plane;
1397
	int i;
1398

1399
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1400
		int ret;
1401

1402 1403 1404
		ret = intel_fbc_check_plane(state, plane);
		if (ret)
			return ret;
1405 1406
	}

1407
	return 0;
1408 1409
}

1410 1411 1412
static void __intel_fbc_enable(struct intel_atomic_state *state,
			       struct intel_crtc *crtc,
			       struct intel_plane *plane)
1413
{
1414
	struct drm_i915_private *i915 = to_i915(state->base.dev);
1415 1416
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
1417
	struct intel_fbc *fbc = plane->fbc;
1418

V
Ville Syrjälä 已提交
1419 1420
	if (fbc->state.plane) {
		if (fbc->state.plane != plane)
1421
			return;
1422

1423 1424
		if (intel_fbc_is_ok(plane_state)) {
			intel_fbc_update_state(state, crtc, plane);
1425
			return;
1426
		}
1427

1428
		__intel_fbc_disable(fbc);
1429
	}
1430

V
Ville Syrjälä 已提交
1431
	drm_WARN_ON(&i915->drm, fbc->active);
1432

V
Ville Syrjälä 已提交
1433 1434 1435
	fbc->no_fbc_reason = plane_state->no_fbc_reason;
	if (fbc->no_fbc_reason)
		return;
1436

V
Ville Syrjälä 已提交
1437 1438
	if (!intel_fbc_is_fence_ok(plane_state)) {
		fbc->no_fbc_reason = "framebuffer not fenced";
1439
		return;
V
Ville Syrjälä 已提交
1440
	}
1441

1442 1443 1444 1445 1446
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "FIFO underrun";
		return;
	}

V
Ville Syrjälä 已提交
1447 1448
	if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
				intel_fbc_min_limit(plane_state))) {
1449
		fbc->no_fbc_reason = "not enough stolen memory";
1450
		return;
1451 1452
	}

1453 1454
	drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
		    plane->base.base.id, plane->base.name);
1455
	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1456

V
Ville Syrjälä 已提交
1457
	intel_fbc_update_state(state, crtc, plane);
1458

1459
	intel_fbc_program_cfb(fbc);
1460 1461 1462
}

/**
1463
 * intel_fbc_disable - disable FBC if it's associated with crtc
1464 1465 1466 1467
 * @crtc: the CRTC
 *
 * This function disables FBC if it's associated with the provided CRTC.
 */
1468
void intel_fbc_disable(struct intel_crtc *crtc)
1469
{
1470 1471
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
	struct intel_plane *plane;
1472

1473 1474
	for_each_intel_plane(&i915->drm, plane) {
		struct intel_fbc *fbc = plane->fbc;
1475

1476 1477 1478 1479
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);
V
Ville Syrjälä 已提交
1480
		if (fbc->state.plane == plane)
1481 1482 1483
			__intel_fbc_disable(fbc);
		mutex_unlock(&fbc->lock);
	}
1484 1485
}

1486 1487 1488 1489 1490
void intel_fbc_update(struct intel_atomic_state *state,
		      struct intel_crtc *crtc)
{
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
1491 1492 1493
	const struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;
1494

1495 1496
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;
1497

1498 1499 1500 1501 1502 1503
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

		if (crtc_state->update_pipe && plane_state->no_fbc_reason) {
V
Ville Syrjälä 已提交
1504
			if (fbc->state.plane == plane)
1505 1506 1507 1508 1509 1510 1511
				__intel_fbc_disable(fbc);
		} else {
			__intel_fbc_enable(state, crtc, plane);
		}

		mutex_unlock(&fbc->lock);
	}
1512 1513
}

1514
/**
1515
 * intel_fbc_global_disable - globally disable FBC
V
Ville Syrjälä 已提交
1516
 * @i915: i915 device instance
1517 1518 1519
 *
 * This function disables FBC regardless of which CRTC is associated with it.
 */
V
Ville Syrjälä 已提交
1520
void intel_fbc_global_disable(struct drm_i915_private *i915)
1521
{
1522 1523
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1524

1525 1526 1527 1528 1529 1530
	for_each_intel_fbc(i915, fbc, fbc_id) {
		mutex_lock(&fbc->lock);
		if (fbc->state.plane)
			__intel_fbc_disable(fbc);
		mutex_unlock(&fbc->lock);
	}
1531 1532
}

1533 1534
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
1535 1536
	struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
	struct drm_i915_private *i915 = fbc->i915;
1537 1538 1539 1540

	mutex_lock(&fbc->lock);

	/* Maybe we were scheduled twice. */
V
Ville Syrjälä 已提交
1541
	if (fbc->underrun_detected || !fbc->state.plane)
1542 1543
		goto out;

V
Ville Syrjälä 已提交
1544
	drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
1545 1546
	fbc->underrun_detected = true;

1547
	intel_fbc_deactivate(fbc, "FIFO underrun");
1548
	if (!fbc->flip_pending)
V
Ville Syrjälä 已提交
1549
		intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
1550
	__intel_fbc_disable(fbc);
1551 1552 1553 1554
out:
	mutex_unlock(&fbc->lock);
}

1555
static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
1556
{
1557
	struct drm_i915_private *i915 = fbc->i915;
1558

1559
	cancel_work_sync(&fbc->underrun_work);
1560

1561
	mutex_lock(&fbc->lock);
1562

1563
	if (fbc->underrun_detected) {
V
Ville Syrjälä 已提交
1564
		drm_dbg_kms(&i915->drm,
1565
			    "Re-allowing FBC after fifo underrun\n");
1566
		fbc->no_fbc_reason = "FIFO underrun cleared";
1567 1568
	}

1569 1570
	fbc->underrun_detected = false;
	mutex_unlock(&fbc->lock);
1571 1572
}

1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
/*
 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
 * @i915: the i915 device
 *
 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
 * want to re-enable FBC after an underrun to increase test coverage.
 */
void intel_fbc_reset_underrun(struct drm_i915_private *i915)
{
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;

	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_reset_underrun(fbc);
}

static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
{
	/*
	 * There's no guarantee that underrun_detected won't be set to true
	 * right after this check and before the work is scheduled, but that's
	 * not a problem since we'll check it again under the work function
	 * while FBC is locked. This check here is just to prevent us from
	 * unnecessarily scheduling the work, and it relies on the fact that we
	 * never switch underrun_detect back to false after it's true.
	 */
	if (READ_ONCE(fbc->underrun_detected))
		return;

	schedule_work(&fbc->underrun_work);
}

1605 1606
/**
 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1607
 * @i915: i915 device
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
 *
 * Without FBC, most underruns are harmless and don't really cause too many
 * problems, except for an annoying message on dmesg. With FBC, underruns can
 * become black screens or even worse, especially when paired with bad
 * watermarks. So in order for us to be on the safe side, completely disable FBC
 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
 * already suggests that watermarks may be bad, so try to be as safe as
 * possible.
 *
 * This function is called from the IRQ handler.
 */
1619
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
1620
{
1621 1622
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1623

1624 1625
	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_handle_fifo_underrun_irq(fbc);
1626 1627
}

1628 1629 1630 1631 1632 1633 1634 1635 1636
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
V
Ville Syrjälä 已提交
1637
static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
1638
{
V
Ville Syrjälä 已提交
1639 1640
	if (i915->params.enable_fbc >= 0)
		return !!i915->params.enable_fbc;
1641

V
Ville Syrjälä 已提交
1642
	if (!HAS_FBC(i915))
1643 1644
		return 0;

V
Ville Syrjälä 已提交
1645
	if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
1646 1647 1648 1649 1650
		return 1;

	return 0;
}

V
Ville Syrjälä 已提交
1651
static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
1652 1653
{
	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1654
	if (i915_vtd_active(i915) &&
V
Ville Syrjälä 已提交
1655 1656
	    (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
		drm_info(&i915->drm,
1657
			 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1658 1659 1660 1661 1662 1663
		return true;
	}

	return false;
}

1664 1665 1666 1667 1668
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
{
	plane->fbc = fbc;
}

1669 1670
static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
					  enum intel_fbc_id fbc_id)
1671 1672 1673 1674 1675 1676 1677
{
	struct intel_fbc *fbc;

	fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
	if (!fbc)
		return NULL;

1678
	fbc->id = fbc_id;
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
	fbc->i915 = i915;
	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
	mutex_init(&fbc->lock);

	if (DISPLAY_VER(i915) >= 7)
		fbc->funcs = &ivb_fbc_funcs;
	else if (DISPLAY_VER(i915) == 6)
		fbc->funcs = &snb_fbc_funcs;
	else if (DISPLAY_VER(i915) == 5)
		fbc->funcs = &ilk_fbc_funcs;
	else if (IS_G4X(i915))
		fbc->funcs = &g4x_fbc_funcs;
	else if (DISPLAY_VER(i915) == 4)
		fbc->funcs = &i965_fbc_funcs;
	else
		fbc->funcs = &i8xx_fbc_funcs;

	return fbc;
}

R
Rodrigo Vivi 已提交
1699 1700
/**
 * intel_fbc_init - Initialize FBC
V
Ville Syrjälä 已提交
1701
 * @i915: the i915 device
R
Rodrigo Vivi 已提交
1702 1703 1704
 *
 * This function might be called during PM init process.
 */
V
Ville Syrjälä 已提交
1705
void intel_fbc_init(struct drm_i915_private *i915)
1706
{
1707
	enum intel_fbc_id fbc_id;
P
Paulo Zanoni 已提交
1708

V
Ville Syrjälä 已提交
1709
	if (!drm_mm_initialized(&i915->mm.stolen))
1710
		mkwrite_device_info(i915)->display.fbc_mask = 0;
1711

V
Ville Syrjälä 已提交
1712
	if (need_fbc_vtd_wa(i915))
1713
		mkwrite_device_info(i915)->display.fbc_mask = 0;
1714

V
Ville Syrjälä 已提交
1715 1716 1717
	i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
	drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
		    i915->params.enable_fbc);
1718

1719 1720
	for_each_fbc_id(i915, fbc_id) {
		struct intel_fbc *fbc;
1721

1722 1723 1724
		fbc = intel_fbc_create(i915, fbc_id);
		if (!fbc)
			continue;
1725

1726 1727 1728 1729 1730 1731 1732
		/*
		 * We still don't have any sort of hardware state readout
		 * for FBC, so deactivate it in case the BIOS activated it
		 * to make sure software matches the hardware state.
		 */
		if (intel_fbc_hw_is_active(fbc))
			intel_fbc_hw_deactivate(fbc);
1733

1734 1735
		i915->fbc[fbc->id] = fbc;
	}
1736
}
1737 1738 1739 1740 1741

static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
	struct intel_fbc *fbc = m->private;
	struct drm_i915_private *i915 = fbc->i915;
1742
	struct intel_plane *plane;
1743 1744
	intel_wakeref_t wakeref;

1745 1746
	drm_modeset_lock_all(&i915->drm);

1747 1748 1749 1750 1751 1752
	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
	mutex_lock(&fbc->lock);

	if (fbc->active) {
		seq_puts(m, "FBC enabled\n");
		seq_printf(m, "Compressing: %s\n",
L
Lucas De Marchi 已提交
1753
			   str_yes_no(intel_fbc_is_compressing(fbc)));
1754 1755 1756 1757
	} else {
		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
	}

1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
	for_each_intel_plane(&i915->drm, plane) {
		const struct intel_plane_state *plane_state =
			to_intel_plane_state(plane->base.state);

		if (plane->fbc != fbc)
			continue;

		seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
			   fbc->state.plane == plane ? '*' : ' ',
			   plane->base.base.id, plane->base.name,
			   plane_state->no_fbc_reason ?: "FBC possible");
	}

1771 1772 1773
	mutex_unlock(&fbc->lock);
	intel_runtime_pm_put(&i915->runtime_pm, wakeref);

1774 1775
	drm_modeset_unlock_all(&i915->drm);

1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
	return 0;
}

DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);

static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
{
	struct intel_fbc *fbc = data;

	*val = fbc->false_color;

	return 0;
}

static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
{
	struct intel_fbc *fbc = data;

	mutex_lock(&fbc->lock);

	fbc->false_color = val;

	if (fbc->active)
		fbc->funcs->set_false_color(fbc, fbc->false_color);

	mutex_unlock(&fbc->lock);

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
			intel_fbc_debugfs_false_color_get,
			intel_fbc_debugfs_false_color_set,
			"%llu\n");

1811 1812
static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
				  struct dentry *parent)
1813
{
1814 1815
	debugfs_create_file("i915_fbc_status", 0444, parent,
			    fbc, &intel_fbc_debugfs_status_fops);
1816 1817

	if (fbc->funcs->set_false_color)
1818 1819
		debugfs_create_file("i915_fbc_false_color", 0644, parent,
				    fbc, &intel_fbc_debugfs_false_color_fops);
1820 1821
}

1822 1823 1824 1825 1826 1827 1828 1829 1830
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
{
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);

	if (plane->fbc)
		intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry);
}

/* FIXME: remove this once igt is on board with per-crtc stuff */
1831 1832
void intel_fbc_debugfs_register(struct drm_i915_private *i915)
{
1833 1834
	struct drm_minor *minor = i915->drm.primary;
	struct intel_fbc *fbc;
1835

1836
	fbc = i915->fbc[INTEL_FBC_A];
1837
	if (fbc)
1838
		intel_fbc_debugfs_add(fbc, minor->debugfs_root);
1839
}