intel_fbc.c 48.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29
/**
 * DOC: Frame Buffer Compression (FBC)
 *
 * FBC tries to save memory bandwidth (and so power consumption) by
 * compressing the amount of memory used by the display. It is total
 * transparent to user space and completely handled in the kernel.
30 31
 *
 * The benefits of FBC are mostly visible with solid backgrounds and
R
Rodrigo Vivi 已提交
32 33
 * variation-less patterns. It comes from keeping the memory footprint small
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
R
Rodrigo Vivi 已提交
35 36 37 38
 * i915 is responsible to reserve stolen memory for FBC and configure its
 * offset on proper registers. The hardware takes care of all
 * compress/decompress. However there are many known cases where we have to
 * forcibly disable it to allow proper screen updates.
39 40
 */

41 42
#include <drm/drm_fourcc.h>

R
Rodrigo Vivi 已提交
43
#include "i915_drv.h"
44
#include "i915_vgpu.h"
45
#include "intel_cdclk.h"
46
#include "intel_de.h"
47
#include "intel_display_trace.h"
48
#include "intel_display_types.h"
49
#include "intel_fbc.h"
50
#include "intel_frontbuffer.h"
R
Rodrigo Vivi 已提交
51

52 53 54
#define for_each_fbc_id(__dev_priv, __fbc_id) \
	for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
		for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id))
55 56

#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
57
	for_each_fbc_id((__dev_priv), (__fbc_id)) \
58 59
		for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)])

60
struct intel_fbc_funcs {
61 62 63 64 65 66 67
	void (*activate)(struct intel_fbc *fbc);
	void (*deactivate)(struct intel_fbc *fbc);
	bool (*is_active)(struct intel_fbc *fbc);
	bool (*is_compressing)(struct intel_fbc *fbc);
	void (*nuke)(struct intel_fbc *fbc);
	void (*program_cfb)(struct intel_fbc *fbc);
	void (*set_false_color)(struct intel_fbc *fbc, bool enable);
68 69
};

70
struct intel_fbc_state {
71
	struct intel_plane *plane;
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
	unsigned int cfb_stride;
	unsigned int cfb_size;
	unsigned int fence_y_offset;
	u16 override_cfb_stride;
	u16 interval;
	s8 fence_id;
};

struct intel_fbc {
	struct drm_i915_private *i915;
	const struct intel_fbc_funcs *funcs;

	/*
	 * This is always the inner lock when overlapping with
	 * struct_mutex and it's the outer lock when overlapping
	 * with stolen_lock.
	 */
	struct mutex lock;
	unsigned int possible_framebuffer_bits;
	unsigned int busy_bits;

	struct drm_mm_node compressed_fb;
	struct drm_mm_node compressed_llb;

96 97
	enum intel_fbc_id id;

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
	u8 limit;

	bool false_color;

	bool active;
	bool activated;
	bool flip_pending;

	bool underrun_detected;
	struct work_struct underrun_work;

	/*
	 * This structure contains everything that's relevant to program the
	 * hardware registers. When we want to figure out if we need to disable
	 * and re-enable FBC for a new configuration we just check if there's
	 * something different in the struct. The genx_fbc_activate functions
	 * are supposed to read from it in order to program the registers.
	 */
V
Ville Syrjälä 已提交
116
	struct intel_fbc_state state;
117 118 119
	const char *no_fbc_reason;
};

120 121
/* plane stride in pixels */
static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
122
{
123 124 125
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int stride;

126
	stride = plane_state->view.color_plane[0].mapping_stride;
127 128 129 130 131 132 133
	if (!drm_rotation_90_or_270(plane_state->hw.rotation))
		stride /= fb->format->cpp[0];

	return stride;
}

/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
134
static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
135 136 137
{
	unsigned int cpp = 4; /* FBC always 4 bytes per pixel */

138
	return intel_fbc_plane_stride(plane_state) * cpp;
139 140 141
}

/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
142
static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
143
{
144
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
145 146
	unsigned int limit = 4; /* 1:4 compression limit is the worst case */
	unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
147
	unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
148 149 150 151
	unsigned int height = 4; /* FBC segment is 4 lines */
	unsigned int stride;

	/* minimum segment stride we can use */
152
	stride = width * cpp * height / limit;
153

154 155 156 157 158 159 160
	/*
	 * Wa_16011863758: icl+
	 * Avoid some hardware segment address miscalculation.
	 */
	if (DISPLAY_VER(i915) >= 11)
		stride += 64;

161 162 163 164 165 166 167 168 169 170 171
	/*
	 * At least some of the platforms require each 4 line segment to
	 * be 512 byte aligned. Just do it always for simplicity.
	 */
	stride = ALIGN(stride, 512);

	/* convert back to single line equivalent with 1:1 compression limit */
	return stride * limit / height;
}

/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
172
static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
173
{
174 175
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	unsigned int stride = _intel_fbc_cfb_stride(plane_state);
176 177 178 179 180 181

	/*
	 * At least some of the platforms require each 4 line segment to
	 * be 512 byte aligned. Aligning each line to 512 bytes guarantees
	 * that regardless of the compression limit we choose later.
	 */
182
	if (DISPLAY_VER(i915) >= 9)
183
		return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
184 185 186 187
	else
		return stride;
}

188
static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
189
{
190 191
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
192

V
Ville Syrjälä 已提交
193
	if (DISPLAY_VER(i915) == 7)
194
		lines = min(lines, 2048);
V
Ville Syrjälä 已提交
195
	else if (DISPLAY_VER(i915) >= 8)
196
		lines = min(lines, 2560);
197

198
	return lines * intel_fbc_cfb_stride(plane_state);
199 200
}

201
static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
202
{
203 204 205 206
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
	unsigned int stride = _intel_fbc_cfb_stride(plane_state);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
207 208 209 210 211 212 213 214 215

	/*
	 * Override stride in 64 byte units per 4 line segment.
	 *
	 * Gen9 hw miscalculates cfb stride for linear as
	 * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
	 * we always need to use the override there.
	 */
	if (stride != stride_aligned ||
216
	    (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
217 218 219 220 221
		return stride_aligned * 4 / 64;

	return 0;
}

222
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
223
{
V
Ville Syrjälä 已提交
224
	const struct intel_fbc_state *fbc_state = &fbc->state;
225
	struct drm_i915_private *i915 = fbc->i915;
226 227 228
	unsigned int cfb_stride;
	u32 fbc_ctl;

V
Ville Syrjälä 已提交
229
	cfb_stride = fbc_state->cfb_stride / fbc->limit;
230 231 232 233 234 235 236 237

	/* FBC_CTL wants 32B or 64B units */
	if (DISPLAY_VER(i915) == 2)
		cfb_stride = (cfb_stride / 32) - 1;
	else
		cfb_stride = (cfb_stride / 64) - 1;

	fbc_ctl = FBC_CTL_PERIODIC |
V
Ville Syrjälä 已提交
238
		FBC_CTL_INTERVAL(fbc_state->interval) |
239 240 241 242 243
		FBC_CTL_STRIDE(cfb_stride);

	if (IS_I945GM(i915))
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */

V
Ville Syrjälä 已提交
244 245
	if (fbc_state->fence_id >= 0)
		fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id);
246 247 248 249

	return fbc_ctl;
}

250
static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
251
{
V
Ville Syrjälä 已提交
252
	const struct intel_fbc_state *fbc_state = &fbc->state;
253 254 255
	u32 fbc_ctl2;

	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
V
Ville Syrjälä 已提交
256
		FBC_CTL_PLANE(fbc_state->plane->i9xx_plane);
257

V
Ville Syrjälä 已提交
258
	if (fbc_state->fence_id >= 0)
259
		fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
260 261 262 263

	return fbc_ctl2;
}

264
static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
265
{
266
	struct drm_i915_private *i915 = fbc->i915;
267 268 269
	u32 fbc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
270
	fbc_ctl = intel_de_read(i915, FBC_CONTROL);
271 272 273 274
	if ((fbc_ctl & FBC_CTL_EN) == 0)
		return;

	fbc_ctl &= ~FBC_CTL_EN;
V
Ville Syrjälä 已提交
275
	intel_de_write(i915, FBC_CONTROL, fbc_ctl);
276 277

	/* Wait for compressing bit to clear */
V
Ville Syrjälä 已提交
278
	if (intel_de_wait_for_clear(i915, FBC_STATUS,
279
				    FBC_STAT_COMPRESSING, 10)) {
V
Ville Syrjälä 已提交
280
		drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
281 282 283 284
		return;
	}
}

285
static void i8xx_fbc_activate(struct intel_fbc *fbc)
286
{
V
Ville Syrjälä 已提交
287
	const struct intel_fbc_state *fbc_state = &fbc->state;
288
	struct drm_i915_private *i915 = fbc->i915;
289 290 291 292
	int i;

	/* Clear old tags */
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
V
Ville Syrjälä 已提交
293
		intel_de_write(i915, FBC_TAG(i), 0);
294

V
Ville Syrjälä 已提交
295 296
	if (DISPLAY_VER(i915) == 4) {
		intel_de_write(i915, FBC_CONTROL2,
297
			       i965_fbc_ctl2(fbc));
V
Ville Syrjälä 已提交
298
		intel_de_write(i915, FBC_FENCE_OFF,
V
Ville Syrjälä 已提交
299
			       fbc_state->fence_y_offset);
300 301
	}

V
Ville Syrjälä 已提交
302
	intel_de_write(i915, FBC_CONTROL,
303
		       FBC_CTL_EN | i8xx_fbc_ctl(fbc));
304 305
}

306
static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
307
{
308
	return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
309 310
}

311
static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
312
{
313
	return intel_de_read(fbc->i915, FBC_STATUS) &
314 315 316
		(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
}

317
static void i8xx_fbc_nuke(struct intel_fbc *fbc)
318
{
V
Ville Syrjälä 已提交
319 320
	struct intel_fbc_state *fbc_state = &fbc->state;
	enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
321
	struct drm_i915_private *dev_priv = fbc->i915;
322 323 324 325 326 327 328

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
			  intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
	spin_unlock_irq(&dev_priv->uncore.lock);
}

329
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
330
{
331
	struct drm_i915_private *i915 = fbc->i915;
332 333 334 335 336 337 338 339 340 341 342 343

	GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
					 fbc->compressed_fb.start, U32_MAX));
	GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
					 fbc->compressed_llb.start, U32_MAX));

	intel_de_write(i915, FBC_CFB_BASE,
		       i915->dsm.start + fbc->compressed_fb.start);
	intel_de_write(i915, FBC_LL_BASE,
		       i915->dsm.start + fbc->compressed_llb.start);
}

344 345 346 347 348
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
	.activate = i8xx_fbc_activate,
	.deactivate = i8xx_fbc_deactivate,
	.is_active = i8xx_fbc_is_active,
	.is_compressing = i8xx_fbc_is_compressing,
349
	.nuke = i8xx_fbc_nuke,
350
	.program_cfb = i8xx_fbc_program_cfb,
351 352
};

353
static void i965_fbc_nuke(struct intel_fbc *fbc)
354
{
V
Ville Syrjälä 已提交
355 356
	struct intel_fbc_state *fbc_state = &fbc->state;
	enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
357
	struct drm_i915_private *dev_priv = fbc->i915;
358 359 360 361 362 363 364 365 366 367 368 369 370

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
			  intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
	spin_unlock_irq(&dev_priv->uncore.lock);
}

static const struct intel_fbc_funcs i965_fbc_funcs = {
	.activate = i8xx_fbc_activate,
	.deactivate = i8xx_fbc_deactivate,
	.is_active = i8xx_fbc_is_active,
	.is_compressing = i8xx_fbc_is_compressing,
	.nuke = i965_fbc_nuke,
371
	.program_cfb = i8xx_fbc_program_cfb,
372 373
};

374
static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
375
{
376
	switch (fbc->limit) {
377
	default:
378
		MISSING_CASE(fbc->limit);
379 380 381 382 383 384 385 386 387 388
		fallthrough;
	case 1:
		return DPFC_CTL_LIMIT_1X;
	case 2:
		return DPFC_CTL_LIMIT_2X;
	case 4:
		return DPFC_CTL_LIMIT_4X;
	}
}

389
static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
390
{
V
Ville Syrjälä 已提交
391
	const struct intel_fbc_state *fbc_state = &fbc->state;
392
	struct drm_i915_private *i915 = fbc->i915;
393 394
	u32 dpfc_ctl;

395
	dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
V
Ville Syrjälä 已提交
396
		DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane);
397

398
	if (IS_G4X(i915))
399
		dpfc_ctl |= DPFC_CTL_SR_EN;
400

V
Ville Syrjälä 已提交
401
	if (fbc_state->fence_id >= 0) {
402
		dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
403 404

		if (DISPLAY_VER(i915) < 6)
V
Ville Syrjälä 已提交
405
			dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id);
406 407 408 409 410
	}

	return dpfc_ctl;
}

411
static void g4x_fbc_activate(struct intel_fbc *fbc)
412
{
V
Ville Syrjälä 已提交
413
	const struct intel_fbc_state *fbc_state = &fbc->state;
414
	struct drm_i915_private *i915 = fbc->i915;
415

V
Ville Syrjälä 已提交
416
	intel_de_write(i915, DPFC_FENCE_YOFF,
V
Ville Syrjälä 已提交
417
		       fbc_state->fence_y_offset);
418

V
Ville Syrjälä 已提交
419
	intel_de_write(i915, DPFC_CONTROL,
420
		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
421 422
}

423
static void g4x_fbc_deactivate(struct intel_fbc *fbc)
424
{
425
	struct drm_i915_private *i915 = fbc->i915;
426 427 428
	u32 dpfc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
429
	dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
430 431
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
V
Ville Syrjälä 已提交
432
		intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
433 434 435
	}
}

436
static bool g4x_fbc_is_active(struct intel_fbc *fbc)
437
{
438
	return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
439 440
}

441
static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
442
{
443
	return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
444 445
}

446
static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
447
{
448
	struct drm_i915_private *i915 = fbc->i915;
449 450 451 452

	intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
}

453 454 455 456 457
static const struct intel_fbc_funcs g4x_fbc_funcs = {
	.activate = g4x_fbc_activate,
	.deactivate = g4x_fbc_deactivate,
	.is_active = g4x_fbc_is_active,
	.is_compressing = g4x_fbc_is_compressing,
458
	.nuke = i965_fbc_nuke,
459
	.program_cfb = g4x_fbc_program_cfb,
460 461
};

462
static void ilk_fbc_activate(struct intel_fbc *fbc)
463
{
V
Ville Syrjälä 已提交
464
	struct intel_fbc_state *fbc_state = &fbc->state;
465
	struct drm_i915_private *i915 = fbc->i915;
466

467
	intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id),
V
Ville Syrjälä 已提交
468
		       fbc_state->fence_y_offset);
469

470
	intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
471
		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
472 473
}

474
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
475
{
476
	struct drm_i915_private *i915 = fbc->i915;
477 478 479
	u32 dpfc_ctl;

	/* Disable compression */
480
	dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id));
481 482
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
483
		intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
484 485 486
	}
}

487
static bool ilk_fbc_is_active(struct intel_fbc *fbc)
488
{
489
	return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN;
490 491
}

492
static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
493
{
494
	return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK;
495 496
}

497
static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
498
{
499
	struct drm_i915_private *i915 = fbc->i915;
500

501
	intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start);
502 503
}

504 505 506 507 508
static const struct intel_fbc_funcs ilk_fbc_funcs = {
	.activate = ilk_fbc_activate,
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
	.is_compressing = ilk_fbc_is_compressing,
509
	.nuke = i965_fbc_nuke,
510
	.program_cfb = ilk_fbc_program_cfb,
511 512
};

513
static void snb_fbc_program_fence(struct intel_fbc *fbc)
514
{
V
Ville Syrjälä 已提交
515
	const struct intel_fbc_state *fbc_state = &fbc->state;
516
	struct drm_i915_private *i915 = fbc->i915;
517 518
	u32 ctl = 0;

V
Ville Syrjälä 已提交
519 520
	if (fbc_state->fence_id >= 0)
		ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id);
521 522

	intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
V
Ville Syrjälä 已提交
523
	intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset);
524 525
}

526
static void snb_fbc_activate(struct intel_fbc *fbc)
527
{
528
	snb_fbc_program_fence(fbc);
529

530
	ilk_fbc_activate(fbc);
531 532
}

533
static void snb_fbc_nuke(struct intel_fbc *fbc)
534
{
535 536
	struct drm_i915_private *i915 = fbc->i915;

537 538
	intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE);
	intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id));
539 540 541 542 543 544 545 546
}

static const struct intel_fbc_funcs snb_fbc_funcs = {
	.activate = snb_fbc_activate,
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
	.is_compressing = ilk_fbc_is_compressing,
	.nuke = snb_fbc_nuke,
547
	.program_cfb = ilk_fbc_program_cfb,
548 549
};

550
static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
551
{
V
Ville Syrjälä 已提交
552
	const struct intel_fbc_state *fbc_state = &fbc->state;
553
	struct drm_i915_private *i915 = fbc->i915;
554
	u32 val = 0;
555

V
Ville Syrjälä 已提交
556
	if (fbc_state->override_cfb_stride)
557
		val |= FBC_STRIDE_OVERRIDE |
V
Ville Syrjälä 已提交
558
			FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
559

560
	intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val);
561
}
562

563
static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
564
{
V
Ville Syrjälä 已提交
565
	const struct intel_fbc_state *fbc_state = &fbc->state;
566
	struct drm_i915_private *i915 = fbc->i915;
567
	u32 val = 0;
568

569
	/* Display WA #0529: skl, kbl, bxt. */
V
Ville Syrjälä 已提交
570
	if (fbc_state->override_cfb_stride)
571
		val |= CHICKEN_FBC_STRIDE_OVERRIDE |
V
Ville Syrjälä 已提交
572
			CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit);
573

574 575 576 577 578
	intel_de_rmw(i915, CHICKEN_MISC_4,
		     CHICKEN_FBC_STRIDE_OVERRIDE |
		     CHICKEN_FBC_STRIDE_MASK, val);
}

579
static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
580
{
V
Ville Syrjälä 已提交
581
	const struct intel_fbc_state *fbc_state = &fbc->state;
582
	struct drm_i915_private *i915 = fbc->i915;
583 584
	u32 dpfc_ctl;

585
	dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
586

587
	if (IS_IVYBRIDGE(i915))
V
Ville Syrjälä 已提交
588
		dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane);
589

V
Ville Syrjälä 已提交
590
	if (fbc_state->fence_id >= 0)
591
		dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
592

593
	if (fbc->false_color)
594
		dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
595

596 597 598
	return dpfc_ctl;
}

599
static void ivb_fbc_activate(struct intel_fbc *fbc)
600
{
601 602
	struct drm_i915_private *i915 = fbc->i915;

V
Ville Syrjälä 已提交
603
	if (DISPLAY_VER(i915) >= 10)
604
		glk_fbc_program_cfb_stride(fbc);
V
Ville Syrjälä 已提交
605
	else if (DISPLAY_VER(i915) == 9)
606
		skl_fbc_program_cfb_stride(fbc);
607

608
	if (to_gt(i915)->ggtt->num_fences)
609
		snb_fbc_program_fence(fbc);
610

611
	intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
612
		       DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
613 614
}

615
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
616
{
617
	return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB;
618 619
}

620
static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
621 622
				    bool enable)
{
623
	intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id),
624
		     DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
625 626
}

V
Ville Syrjälä 已提交
627 628
static const struct intel_fbc_funcs ivb_fbc_funcs = {
	.activate = ivb_fbc_activate,
629 630
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
V
Ville Syrjälä 已提交
631
	.is_compressing = ivb_fbc_is_compressing,
632
	.nuke = snb_fbc_nuke,
633
	.program_cfb = ilk_fbc_program_cfb,
634
	.set_false_color = ivb_fbc_set_false_color,
635 636
};

637
static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
638
{
639
	return fbc->funcs->is_active(fbc);
640 641
}

642
static void intel_fbc_hw_activate(struct intel_fbc *fbc)
643
{
V
Ville Syrjälä 已提交
644
	trace_intel_fbc_activate(fbc->state.plane);
645

646
	fbc->active = true;
647
	fbc->activated = true;
648

649
	fbc->funcs->activate(fbc);
650 651
}

652
static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
653
{
V
Ville Syrjälä 已提交
654
	trace_intel_fbc_deactivate(fbc->state.plane);
655

656 657
	fbc->active = false;

658
	fbc->funcs->deactivate(fbc);
659 660
}

661
static bool intel_fbc_is_compressing(struct intel_fbc *fbc)
662
{
663
	return fbc->funcs->is_compressing(fbc);
664 665
}

666
static void intel_fbc_nuke(struct intel_fbc *fbc)
667
{
V
Ville Syrjälä 已提交
668
	trace_intel_fbc_nuke(fbc->state.plane);
669

670
	fbc->funcs->nuke(fbc);
671 672
}

673
static void intel_fbc_activate(struct intel_fbc *fbc)
674
{
675 676
	intel_fbc_hw_activate(fbc);
	intel_fbc_nuke(fbc);
V
Ville Syrjälä 已提交
677 678

	fbc->no_fbc_reason = NULL;
679 680
}

681
static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
P
Paulo Zanoni 已提交
682
{
683
	struct drm_i915_private *i915 = fbc->i915;
684

V
Ville Syrjälä 已提交
685
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
P
Paulo Zanoni 已提交
686

687
	if (fbc->active)
688
		intel_fbc_hw_deactivate(fbc);
689 690

	fbc->no_fbc_reason = reason;
691 692
}

693 694
static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
{
695
	if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
696 697 698 699 700
		return BIT_ULL(28);
	else
		return BIT_ULL(32);
}

V
Ville Syrjälä 已提交
701
static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
702
{
703 704 705 706 707 708
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
V
Ville Syrjälä 已提交
709 710 711
	if (IS_BROADWELL(i915) ||
	    (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
		end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
712
	else
713
		end = U64_MAX;
714

V
Ville Syrjälä 已提交
715
	return min(end, intel_fbc_cfb_base_max(i915));
716 717
}

718
static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
719
{
V
Ville Syrjälä 已提交
720
	return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1;
721 722
}

V
Ville Syrjälä 已提交
723
static int intel_fbc_max_limit(struct drm_i915_private *i915)
724 725
{
	/* WaFbcOnly1to1Ratio:ctg */
V
Ville Syrjälä 已提交
726
	if (IS_G4X(i915))
727 728
		return 1;

729 730 731 732
	/*
	 * FBC2 can only do 1:1, 1:2, 1:4, we limit
	 * FBC1 to the same out of convenience.
	 */
733
	return 4;
734 735
}

736
static int find_compression_limit(struct intel_fbc *fbc,
737
				  unsigned int size, int min_limit)
738
{
739
	struct drm_i915_private *i915 = fbc->i915;
V
Ville Syrjälä 已提交
740
	u64 end = intel_fbc_stolen_end(i915);
741 742 743
	int ret, limit = min_limit;

	size /= limit;
744 745

	/* Try to over-allocate to reduce reallocations and fragmentation. */
V
Ville Syrjälä 已提交
746
	ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
747
						   size <<= 1, 4096, 0, end);
748
	if (ret == 0)
749
		return limit;
750

V
Ville Syrjälä 已提交
751 752
	for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
		ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
753 754 755
							   size >>= 1, 4096, 0, end);
		if (ret == 0)
			return limit;
756
	}
757 758

	return 0;
759 760
}

761
static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
762
			       unsigned int size, int min_limit)
763
{
764
	struct drm_i915_private *i915 = fbc->i915;
765
	int ret;
766

V
Ville Syrjälä 已提交
767
	drm_WARN_ON(&i915->drm,
768
		    drm_mm_node_allocated(&fbc->compressed_fb));
V
Ville Syrjälä 已提交
769
	drm_WARN_ON(&i915->drm,
770
		    drm_mm_node_allocated(&fbc->compressed_llb));
771

V
Ville Syrjälä 已提交
772 773
	if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
		ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
774 775 776 777 778
						  4096, 4096);
		if (ret)
			goto err;
	}

779
	ret = find_compression_limit(fbc, size, min_limit);
780 781
	if (!ret)
		goto err_llb;
782
	else if (ret > min_limit)
V
Ville Syrjälä 已提交
783
		drm_info_once(&i915->drm,
784
			      "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
785

786
	fbc->limit = ret;
787

V
Ville Syrjälä 已提交
788
	drm_dbg_kms(&i915->drm,
789 790
		    "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
		    fbc->compressed_fb.size, fbc->limit);
791 792 793 794

	return 0;

err_llb:
795
	if (drm_mm_node_allocated(&fbc->compressed_llb))
V
Ville Syrjälä 已提交
796
		i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
797
err:
V
Ville Syrjälä 已提交
798 799
	if (drm_mm_initialized(&i915->mm.stolen))
		drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
800 801 802
	return -ENOSPC;
}

803
static void intel_fbc_program_cfb(struct intel_fbc *fbc)
804
{
805
	fbc->funcs->program_cfb(fbc);
806 807
}

808
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
809
{
810
	struct drm_i915_private *i915 = fbc->i915;
811

812
	if (WARN_ON(intel_fbc_hw_is_active(fbc)))
813 814
		return;

815
	if (drm_mm_node_allocated(&fbc->compressed_llb))
V
Ville Syrjälä 已提交
816
		i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
817
	if (drm_mm_node_allocated(&fbc->compressed_fb))
V
Ville Syrjälä 已提交
818
		i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
819 820
}

821
void intel_fbc_cleanup(struct drm_i915_private *i915)
P
Paulo Zanoni 已提交
822
{
823 824
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
825

826 827 828 829
	for_each_intel_fbc(i915, fbc, fbc_id) {
		mutex_lock(&fbc->lock);
		__intel_fbc_cleanup_cfb(fbc);
		mutex_unlock(&fbc->lock);
830

831 832
		kfree(fbc);
	}
P
Paulo Zanoni 已提交
833 834
}

835
static bool stride_is_valid(const struct intel_plane_state *plane_state)
836
{
837 838 839 840 841
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int stride = intel_fbc_plane_stride(plane_state) *
		fb->format->cpp[0];

842
	/* This should have been caught earlier. */
V
Ville Syrjälä 已提交
843
	if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0))
844
		return false;
845 846

	/* Below are the additional FBC restrictions. */
847 848
	if (stride < 512)
		return false;
849

V
Ville Syrjälä 已提交
850
	if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3)
851 852
		return stride == 4096 || stride == 8192;

V
Ville Syrjälä 已提交
853
	if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048)
854 855
		return false;

856
	/* Display WA #1105: skl,bxt,kbl,cfl,glk */
V
Ville Syrjälä 已提交
857
	if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) &&
858
	    fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
859 860
		return false;

861 862 863 864 865 866
	if (stride > 16384)
		return false;

	return true;
}

867
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
868
{
869 870 871 872
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;

	switch (fb->format->format) {
873 874 875 876 877 878
	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_XBGR8888:
		return true;
	case DRM_FORMAT_XRGB1555:
	case DRM_FORMAT_RGB565:
		/* 16bpp not supported on gen2 */
V
Ville Syrjälä 已提交
879
		if (DISPLAY_VER(i915) == 2)
880 881
			return false;
		/* WaFbcOnly1to1Ratio:ctg */
V
Ville Syrjälä 已提交
882
		if (IS_G4X(i915))
883 884 885 886 887 888 889
			return false;
		return true;
	default:
		return false;
	}
}

890
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
891
{
892 893 894 895 896
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int rotation = plane_state->hw.rotation;

	if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 &&
897 898
	    drm_rotation_90_or_270(rotation))
		return false;
V
Ville Syrjälä 已提交
899
	else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) &&
900 901 902 903 904 905
		 rotation != DRM_MODE_ROTATE_0)
		return false;

	return true;
}

906 907 908
/*
 * For some reason, the hardware tracking starts looking at whatever we
 * programmed as the display plane base address register. It does not look at
909 910
 * the X and Y offset registers. That's why we include the src x/y offsets
 * instead of just looking at the plane size.
911
 */
912
static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
913
{
914
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
915
	unsigned int effective_w, effective_h, max_w, max_h;
916

V
Ville Syrjälä 已提交
917
	if (DISPLAY_VER(i915) >= 10) {
918 919
		max_w = 5120;
		max_h = 4096;
V
Ville Syrjälä 已提交
920
	} else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
921 922
		max_w = 4096;
		max_h = 4096;
V
Ville Syrjälä 已提交
923
	} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
924 925 926 927 928 929 930
		max_w = 4096;
		max_h = 2048;
	} else {
		max_w = 2048;
		max_h = 1536;
	}

931 932 933 934
	effective_w = plane_state->view.color_plane[0].x +
		(drm_rect_width(&plane_state->uapi.src) >> 16);
	effective_h = plane_state->view.color_plane[0].y +
		(drm_rect_height(&plane_state->uapi.src) >> 16);
935 936

	return effective_w <= max_w && effective_h <= max_h;
937 938
}

939
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
940
{
941 942 943 944
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;

	switch (fb->modifier) {
945 946
	case DRM_FORMAT_MOD_LINEAR:
	case I915_FORMAT_MOD_Y_TILED:
947
	case I915_FORMAT_MOD_Yf_TILED:
V
Ville Syrjälä 已提交
948
		return DISPLAY_VER(i915) >= 9;
949
	case I915_FORMAT_MOD_4_TILED:
950
	case I915_FORMAT_MOD_X_TILED:
951 952 953 954 955 956
		return true;
	default:
		return false;
	}
}

V
Ville Syrjälä 已提交
957 958 959
static void intel_fbc_update_state(struct intel_atomic_state *state,
				   struct intel_crtc *crtc,
				   struct intel_plane *plane)
960
{
961 962 963 964 965 966
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	struct intel_fbc *fbc = plane->fbc;
V
Ville Syrjälä 已提交
967
	struct intel_fbc_state *fbc_state = &fbc->state;
968

V
Ville Syrjälä 已提交
969
	WARN_ON(plane_state->no_fbc_reason);
970

V
Ville Syrjälä 已提交
971
	fbc_state->plane = plane;
972

973
	/* FBC1 compression interval: arbitrary choice of 1 second */
V
Ville Syrjälä 已提交
974
	fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
975

V
Ville Syrjälä 已提交
976
	fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state);
977

V
Ville Syrjälä 已提交
978
	drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
979
		    !plane_state->ggtt_vma->fence);
980 981

	if (plane_state->flags & PLANE_HAS_FENCE &&
982
	    plane_state->ggtt_vma->fence)
V
Ville Syrjälä 已提交
983
		fbc_state->fence_id = plane_state->ggtt_vma->fence->id;
984
	else
V
Ville Syrjälä 已提交
985
		fbc_state->fence_id = -1;
986

V
Ville Syrjälä 已提交
987 988 989
	fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state);
	fbc_state->cfb_size = intel_fbc_cfb_size(plane_state);
	fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
990 991
}

V
Ville Syrjälä 已提交
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
{
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);

	/* The use of a CPU fence is one of two ways to detect writes by the
	 * CPU to the scanout and trigger updates to the FBC.
	 *
	 * The other method is by software tracking (see
	 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
	 * the current compressed buffer and recompress it.
	 *
	 * Note that is possible for a tiled surface to be unmappable (and
	 * so have no fence associated with it) due to aperture constraints
	 * at the time of pinning.
	 *
	 * FIXME with 90/270 degree rotation we should use the fence on
	 * the normal GTT view (the rotated view doesn't even have a
	 * fence). Would need changes to the FBC fence Y offset as well.
	 * For now this will effectively disable FBC with 90/270 degree
	 * rotation.
	 */
	return DISPLAY_VER(i915) >= 9 ||
		(plane_state->flags & PLANE_HAS_FENCE &&
		 plane_state->ggtt_vma->fence);
}

static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
{
	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
	struct intel_fbc *fbc = plane->fbc;

	return intel_fbc_min_limit(plane_state) <= fbc->limit &&
		intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit;
}

static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
1028
{
V
Ville Syrjälä 已提交
1029 1030 1031
	return !plane_state->no_fbc_reason &&
		intel_fbc_is_fence_ok(plane_state) &&
		intel_fbc_is_cfb_ok(plane_state);
1032 1033
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
static int intel_fbc_check_plane(struct intel_atomic_state *state,
				 struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
	const struct intel_crtc_state *crtc_state;
	struct intel_fbc *fbc = plane->fbc;

	if (!fbc)
		return 0;

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	if (intel_vgpu_active(i915)) {
		plane_state->no_fbc_reason = "VGPU active";
		return 0;
	}

	if (!i915->params.enable_fbc) {
		plane_state->no_fbc_reason = "disabled per module param or by default";
		return 0;
	}

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	if (!plane_state->uapi.visible) {
		plane_state->no_fbc_reason = "plane not visible";
		return 0;
	}

	crtc_state = intel_atomic_get_new_crtc_state(state, crtc);

	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
		plane_state->no_fbc_reason = "interlaced mode not supported";
		return 0;
	}

1070 1071 1072 1073 1074
	if (crtc_state->double_wide) {
		plane_state->no_fbc_reason = "double wide pipe not supported";
		return 0;
	}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
	/*
	 * Display 12+ is not supporting FBC with PSR2.
	 * Recommendation is to keep this combination disabled
	 * Bspec: 50422 HSD: 14010260002
	 */
	if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
		plane_state->no_fbc_reason = "PSR2 enabled";
		return false;
	}

	if (!pixel_format_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "pixel format not supported";
		return 0;
	}

	if (!tiling_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "tiling not supported";
		return 0;
	}

	if (!rotation_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "rotation not supported";
		return 0;
	}

	if (!stride_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "stride not supported";
		return 0;
	}

	if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
	    fb->format->has_alpha) {
		plane_state->no_fbc_reason = "per-pixel alpha not supported";
		return false;
	}

	if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
		plane_state->no_fbc_reason = "plane size too big";
		return 0;
	}

	/*
	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
	 * and screen flicker.
	 */
	if (DISPLAY_VER(i915) >= 9 &&
	    plane_state->view.color_plane[0].y & 3) {
		plane_state->no_fbc_reason = "plane start Y offset misaligned";
		return false;
	}

	/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
	if (DISPLAY_VER(i915) >= 11 &&
1129 1130
	    (plane_state->view.color_plane[0].y +
	     (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
		plane_state->no_fbc_reason = "plane end Y offset misaligned";
		return false;
	}

	/* WaFbcExceedCdClockThreshold:hsw,bdw */
	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
		const struct intel_cdclk_state *cdclk_state;

		cdclk_state = intel_atomic_get_cdclk_state(state);
		if (IS_ERR(cdclk_state))
			return PTR_ERR(cdclk_state);

		if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
			plane_state->no_fbc_reason = "pixel rate too high";
			return 0;
		}
	}

	plane_state->no_fbc_reason = NULL;

	return 0;
}

1154

1155 1156 1157
static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
				    struct intel_crtc *crtc,
				    struct intel_plane *plane)
1158
{
1159 1160 1161 1162 1163 1164 1165 1166
	const struct intel_crtc_state *new_crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *old_plane_state =
		intel_atomic_get_old_plane_state(state, plane);
	const struct intel_plane_state *new_plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
	const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
1167

1168
	if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
1169 1170
		return false;

V
Ville Syrjälä 已提交
1171 1172
	if (!intel_fbc_is_ok(old_plane_state) ||
	    !intel_fbc_is_ok(new_plane_state))
1173 1174 1175
		return false;

	if (old_fb->format->format != new_fb->format->format)
1176 1177
		return false;

1178
	if (old_fb->modifier != new_fb->modifier)
1179 1180
		return false;

1181 1182
	if (intel_fbc_plane_stride(old_plane_state) !=
	    intel_fbc_plane_stride(new_plane_state))
1183 1184
		return false;

V
Ville Syrjälä 已提交
1185 1186
	if (intel_fbc_cfb_stride(old_plane_state) !=
	    intel_fbc_cfb_stride(new_plane_state))
1187 1188
		return false;

V
Ville Syrjälä 已提交
1189 1190
	if (intel_fbc_cfb_size(old_plane_state) !=
	    intel_fbc_cfb_size(new_plane_state))
1191 1192
		return false;

V
Ville Syrjälä 已提交
1193 1194
	if (intel_fbc_override_cfb_stride(old_plane_state) !=
	    intel_fbc_override_cfb_stride(new_plane_state))
1195 1196 1197 1198 1199
		return false;

	return true;
}

1200 1201 1202
static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
				   struct intel_crtc *crtc,
				   struct intel_plane *plane)
1203
{
1204
	struct drm_i915_private *i915 = to_i915(state->base.dev);
1205
	struct intel_fbc *fbc = plane->fbc;
1206
	bool need_vblank_wait = false;
1207

1208
	fbc->flip_pending = true;
1209

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	if (intel_fbc_can_flip_nuke(state, crtc, plane))
		return need_vblank_wait;

	intel_fbc_deactivate(fbc, "update pending");

	/*
	 * Display WA #1198: glk+
	 * Need an extra vblank wait between FBC disable and most plane
	 * updates. Bspec says this is only needed for plane disable, but
	 * that is not true. Touching most plane registers will cause the
	 * corruption to appear. Also SKL/derivatives do not seem to be
	 * affected.
	 *
	 * TODO: could optimize this a bit by sampling the frame
	 * counter when we disable FBC (if it was already done earlier)
	 * and skipping the extra vblank wait before the plane update
	 * if at least one frame has already passed.
	 */
	if (fbc->activated && DISPLAY_VER(i915) >= 10)
		need_vblank_wait = true;
	fbc->activated = false;
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250

	return need_vblank_wait;
}

bool intel_fbc_pre_update(struct intel_atomic_state *state,
			  struct intel_crtc *crtc)
{
	const struct intel_plane_state *plane_state;
	bool need_vblank_wait = false;
	struct intel_plane *plane;
	int i;

	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;

		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

V
Ville Syrjälä 已提交
1251
		if (fbc->state.plane == plane)
1252 1253 1254 1255
			need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane);

		mutex_unlock(&fbc->lock);
	}
1256 1257

	return need_vblank_wait;
1258 1259
}

1260
static void __intel_fbc_disable(struct intel_fbc *fbc)
1261
{
1262
	struct drm_i915_private *i915 = fbc->i915;
V
Ville Syrjälä 已提交
1263
	struct intel_plane *plane = fbc->state.plane;
1264

V
Ville Syrjälä 已提交
1265 1266
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
	drm_WARN_ON(&i915->drm, fbc->active);
1267

1268 1269
	drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n",
		    plane->base.base.id, plane->base.name);
1270

1271
	__intel_fbc_cleanup_cfb(fbc);
1272

V
Ville Syrjälä 已提交
1273
	fbc->state.plane = NULL;
1274 1275
}

1276
static void __intel_fbc_post_update(struct intel_fbc *fbc)
1277
{
1278
	struct drm_i915_private *i915 = fbc->i915;
1279

V
Ville Syrjälä 已提交
1280
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
1281

1282
	if (!fbc->busy_bits)
1283
		intel_fbc_activate(fbc);
1284
	else
1285
		intel_fbc_deactivate(fbc, "frontbuffer write");
P
Paulo Zanoni 已提交
1286 1287
}

1288 1289
void intel_fbc_post_update(struct intel_atomic_state *state,
			   struct intel_crtc *crtc)
P
Paulo Zanoni 已提交
1290
{
1291 1292 1293
	const struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;
1294

1295 1296
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;
1297

1298 1299 1300 1301 1302
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

V
Ville Syrjälä 已提交
1303
		if (fbc->state.plane == plane) {
1304 1305 1306 1307 1308
			fbc->flip_pending = false;
			__intel_fbc_post_update(fbc);
		}

		mutex_unlock(&fbc->lock);
1309
	}
1310 1311
}

1312 1313
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
V
Ville Syrjälä 已提交
1314 1315
	if (fbc->state.plane)
		return fbc->state.plane->frontbuffer_bit;
1316 1317 1318 1319
	else
		return fbc->possible_framebuffer_bits;
}

1320 1321 1322
static void __intel_fbc_invalidate(struct intel_fbc *fbc,
				   unsigned int frontbuffer_bits,
				   enum fb_op_origin origin)
1323
{
1324
	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1325 1326
		return;

1327
	mutex_lock(&fbc->lock);
P
Paulo Zanoni 已提交
1328

1329
	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
1330

V
Ville Syrjälä 已提交
1331
	if (fbc->state.plane && fbc->busy_bits)
1332
		intel_fbc_deactivate(fbc, "frontbuffer write");
P
Paulo Zanoni 已提交
1333

1334
	mutex_unlock(&fbc->lock);
1335 1336
}

1337 1338 1339
void intel_fbc_invalidate(struct drm_i915_private *i915,
			  unsigned int frontbuffer_bits,
			  enum fb_op_origin origin)
1340
{
1341 1342
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1343

1344 1345 1346 1347
	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_invalidate(fbc, frontbuffer_bits, origin);

}
1348

1349 1350 1351 1352
static void __intel_fbc_flush(struct intel_fbc *fbc,
			      unsigned int frontbuffer_bits,
			      enum fb_op_origin origin)
{
1353
	mutex_lock(&fbc->lock);
1354

1355
	fbc->busy_bits &= ~frontbuffer_bits;
1356

1357
	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1358 1359
		goto out;

V
Ville Syrjälä 已提交
1360
	if (!fbc->busy_bits && fbc->state.plane &&
1361
	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1362
		if (fbc->active)
1363
			intel_fbc_nuke(fbc);
1364
		else if (!fbc->flip_pending)
1365
			__intel_fbc_post_update(fbc);
1366
	}
P
Paulo Zanoni 已提交
1367

1368
out:
1369
	mutex_unlock(&fbc->lock);
1370 1371
}

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
void intel_fbc_flush(struct drm_i915_private *i915,
		     unsigned int frontbuffer_bits,
		     enum fb_op_origin origin)
{
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;

	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_flush(fbc, frontbuffer_bits, origin);
}

1383
int intel_fbc_atomic_check(struct intel_atomic_state *state)
1384
{
1385
	struct intel_plane_state *plane_state;
1386
	struct intel_plane *plane;
1387
	int i;
1388

1389
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1390
		int ret;
1391

1392 1393 1394
		ret = intel_fbc_check_plane(state, plane);
		if (ret)
			return ret;
1395 1396
	}

1397
	return 0;
1398 1399
}

1400 1401 1402
static void __intel_fbc_enable(struct intel_atomic_state *state,
			       struct intel_crtc *crtc,
			       struct intel_plane *plane)
1403
{
1404
	struct drm_i915_private *i915 = to_i915(state->base.dev);
1405 1406
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
1407
	struct intel_fbc *fbc = plane->fbc;
1408

V
Ville Syrjälä 已提交
1409 1410
	if (fbc->state.plane) {
		if (fbc->state.plane != plane)
1411
			return;
1412

1413 1414
		if (intel_fbc_is_ok(plane_state)) {
			intel_fbc_update_state(state, crtc, plane);
1415
			return;
1416
		}
1417

1418
		__intel_fbc_disable(fbc);
1419
	}
1420

V
Ville Syrjälä 已提交
1421
	drm_WARN_ON(&i915->drm, fbc->active);
1422

V
Ville Syrjälä 已提交
1423 1424 1425
	fbc->no_fbc_reason = plane_state->no_fbc_reason;
	if (fbc->no_fbc_reason)
		return;
1426

V
Ville Syrjälä 已提交
1427 1428
	if (!intel_fbc_is_fence_ok(plane_state)) {
		fbc->no_fbc_reason = "framebuffer not fenced";
1429
		return;
V
Ville Syrjälä 已提交
1430
	}
1431

1432 1433 1434 1435 1436
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "FIFO underrun";
		return;
	}

V
Ville Syrjälä 已提交
1437 1438
	if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state),
				intel_fbc_min_limit(plane_state))) {
1439
		fbc->no_fbc_reason = "not enough stolen memory";
1440
		return;
1441 1442
	}

1443 1444
	drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n",
		    plane->base.base.id, plane->base.name);
1445
	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1446

V
Ville Syrjälä 已提交
1447
	intel_fbc_update_state(state, crtc, plane);
1448

1449
	intel_fbc_program_cfb(fbc);
1450 1451 1452
}

/**
1453
 * intel_fbc_disable - disable FBC if it's associated with crtc
1454 1455 1456 1457
 * @crtc: the CRTC
 *
 * This function disables FBC if it's associated with the provided CRTC.
 */
1458
void intel_fbc_disable(struct intel_crtc *crtc)
1459
{
1460 1461
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
	struct intel_plane *plane;
1462

1463 1464
	for_each_intel_plane(&i915->drm, plane) {
		struct intel_fbc *fbc = plane->fbc;
1465

1466 1467 1468 1469
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);
V
Ville Syrjälä 已提交
1470
		if (fbc->state.plane == plane)
1471 1472 1473
			__intel_fbc_disable(fbc);
		mutex_unlock(&fbc->lock);
	}
1474 1475
}

1476 1477 1478 1479 1480
void intel_fbc_update(struct intel_atomic_state *state,
		      struct intel_crtc *crtc)
{
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
1481 1482 1483
	const struct intel_plane_state *plane_state;
	struct intel_plane *plane;
	int i;
1484

1485 1486
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
		struct intel_fbc *fbc = plane->fbc;
1487

1488 1489 1490 1491 1492 1493
		if (!fbc || plane->pipe != crtc->pipe)
			continue;

		mutex_lock(&fbc->lock);

		if (crtc_state->update_pipe && plane_state->no_fbc_reason) {
V
Ville Syrjälä 已提交
1494
			if (fbc->state.plane == plane)
1495 1496 1497 1498 1499 1500 1501
				__intel_fbc_disable(fbc);
		} else {
			__intel_fbc_enable(state, crtc, plane);
		}

		mutex_unlock(&fbc->lock);
	}
1502 1503
}

1504
/**
1505
 * intel_fbc_global_disable - globally disable FBC
V
Ville Syrjälä 已提交
1506
 * @i915: i915 device instance
1507 1508 1509
 *
 * This function disables FBC regardless of which CRTC is associated with it.
 */
V
Ville Syrjälä 已提交
1510
void intel_fbc_global_disable(struct drm_i915_private *i915)
1511
{
1512 1513
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1514

1515 1516 1517 1518 1519 1520
	for_each_intel_fbc(i915, fbc, fbc_id) {
		mutex_lock(&fbc->lock);
		if (fbc->state.plane)
			__intel_fbc_disable(fbc);
		mutex_unlock(&fbc->lock);
	}
1521 1522
}

1523 1524
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
1525 1526
	struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work);
	struct drm_i915_private *i915 = fbc->i915;
1527 1528 1529 1530

	mutex_lock(&fbc->lock);

	/* Maybe we were scheduled twice. */
V
Ville Syrjälä 已提交
1531
	if (fbc->underrun_detected || !fbc->state.plane)
1532 1533
		goto out;

V
Ville Syrjälä 已提交
1534
	drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
1535 1536
	fbc->underrun_detected = true;

1537
	intel_fbc_deactivate(fbc, "FIFO underrun");
1538
	if (!fbc->flip_pending)
V
Ville Syrjälä 已提交
1539
		intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe));
1540
	__intel_fbc_disable(fbc);
1541 1542 1543 1544
out:
	mutex_unlock(&fbc->lock);
}

1545
static void __intel_fbc_reset_underrun(struct intel_fbc *fbc)
1546
{
1547
	struct drm_i915_private *i915 = fbc->i915;
1548

1549
	cancel_work_sync(&fbc->underrun_work);
1550

1551
	mutex_lock(&fbc->lock);
1552

1553
	if (fbc->underrun_detected) {
V
Ville Syrjälä 已提交
1554
		drm_dbg_kms(&i915->drm,
1555
			    "Re-allowing FBC after fifo underrun\n");
1556
		fbc->no_fbc_reason = "FIFO underrun cleared";
1557 1558
	}

1559 1560
	fbc->underrun_detected = false;
	mutex_unlock(&fbc->lock);
1561 1562
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594
/*
 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
 * @i915: the i915 device
 *
 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
 * want to re-enable FBC after an underrun to increase test coverage.
 */
void intel_fbc_reset_underrun(struct drm_i915_private *i915)
{
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;

	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_reset_underrun(fbc);
}

static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
{
	/*
	 * There's no guarantee that underrun_detected won't be set to true
	 * right after this check and before the work is scheduled, but that's
	 * not a problem since we'll check it again under the work function
	 * while FBC is locked. This check here is just to prevent us from
	 * unnecessarily scheduling the work, and it relies on the fact that we
	 * never switch underrun_detect back to false after it's true.
	 */
	if (READ_ONCE(fbc->underrun_detected))
		return;

	schedule_work(&fbc->underrun_work);
}

1595 1596
/**
 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1597
 * @i915: i915 device
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
 *
 * Without FBC, most underruns are harmless and don't really cause too many
 * problems, except for an annoying message on dmesg. With FBC, underruns can
 * become black screens or even worse, especially when paired with bad
 * watermarks. So in order for us to be on the safe side, completely disable FBC
 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
 * already suggests that watermarks may be bad, so try to be as safe as
 * possible.
 *
 * This function is called from the IRQ handler.
 */
1609
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
1610
{
1611 1612
	struct intel_fbc *fbc;
	enum intel_fbc_id fbc_id;
1613

1614 1615
	for_each_intel_fbc(i915, fbc, fbc_id)
		__intel_fbc_handle_fifo_underrun_irq(fbc);
1616 1617
}

1618 1619 1620 1621 1622 1623 1624 1625 1626
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
V
Ville Syrjälä 已提交
1627
static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
1628
{
V
Ville Syrjälä 已提交
1629 1630
	if (i915->params.enable_fbc >= 0)
		return !!i915->params.enable_fbc;
1631

V
Ville Syrjälä 已提交
1632
	if (!HAS_FBC(i915))
1633 1634
		return 0;

V
Ville Syrjälä 已提交
1635
	if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
1636 1637 1638 1639 1640
		return 1;

	return 0;
}

V
Ville Syrjälä 已提交
1641
static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
1642 1643
{
	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1644
	if (intel_vtd_active(i915) &&
V
Ville Syrjälä 已提交
1645 1646
	    (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
		drm_info(&i915->drm,
1647
			 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1648 1649 1650 1651 1652 1653
		return true;
	}

	return false;
}

1654 1655 1656 1657 1658 1659 1660 1661 1662
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane)
{
	if (!fbc)
		return;

	plane->fbc = fbc;
	fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}

1663 1664
static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915,
					  enum intel_fbc_id fbc_id)
1665 1666 1667 1668 1669 1670 1671
{
	struct intel_fbc *fbc;

	fbc = kzalloc(sizeof(*fbc), GFP_KERNEL);
	if (!fbc)
		return NULL;

1672
	fbc->id = fbc_id;
1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
	fbc->i915 = i915;
	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
	mutex_init(&fbc->lock);

	if (DISPLAY_VER(i915) >= 7)
		fbc->funcs = &ivb_fbc_funcs;
	else if (DISPLAY_VER(i915) == 6)
		fbc->funcs = &snb_fbc_funcs;
	else if (DISPLAY_VER(i915) == 5)
		fbc->funcs = &ilk_fbc_funcs;
	else if (IS_G4X(i915))
		fbc->funcs = &g4x_fbc_funcs;
	else if (DISPLAY_VER(i915) == 4)
		fbc->funcs = &i965_fbc_funcs;
	else
		fbc->funcs = &i8xx_fbc_funcs;

	return fbc;
}

R
Rodrigo Vivi 已提交
1693 1694
/**
 * intel_fbc_init - Initialize FBC
V
Ville Syrjälä 已提交
1695
 * @i915: the i915 device
R
Rodrigo Vivi 已提交
1696 1697 1698
 *
 * This function might be called during PM init process.
 */
V
Ville Syrjälä 已提交
1699
void intel_fbc_init(struct drm_i915_private *i915)
1700
{
1701
	enum intel_fbc_id fbc_id;
P
Paulo Zanoni 已提交
1702

V
Ville Syrjälä 已提交
1703
	if (!drm_mm_initialized(&i915->mm.stolen))
1704
		mkwrite_device_info(i915)->display.fbc_mask = 0;
1705

V
Ville Syrjälä 已提交
1706
	if (need_fbc_vtd_wa(i915))
1707
		mkwrite_device_info(i915)->display.fbc_mask = 0;
1708

V
Ville Syrjälä 已提交
1709 1710 1711
	i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
	drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
		    i915->params.enable_fbc);
1712

1713 1714
	for_each_fbc_id(i915, fbc_id) {
		struct intel_fbc *fbc;
1715

1716 1717 1718
		fbc = intel_fbc_create(i915, fbc_id);
		if (!fbc)
			continue;
1719

1720 1721 1722 1723 1724 1725 1726
		/*
		 * We still don't have any sort of hardware state readout
		 * for FBC, so deactivate it in case the BIOS activated it
		 * to make sure software matches the hardware state.
		 */
		if (intel_fbc_hw_is_active(fbc))
			intel_fbc_hw_deactivate(fbc);
1727

1728 1729
		i915->fbc[fbc->id] = fbc;
	}
1730
}
1731 1732 1733 1734 1735

static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
{
	struct intel_fbc *fbc = m->private;
	struct drm_i915_private *i915 = fbc->i915;
1736
	struct intel_plane *plane;
1737 1738
	intel_wakeref_t wakeref;

1739 1740
	drm_modeset_lock_all(&i915->drm);

1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
	mutex_lock(&fbc->lock);

	if (fbc->active) {
		seq_puts(m, "FBC enabled\n");
		seq_printf(m, "Compressing: %s\n",
			   yesno(intel_fbc_is_compressing(fbc)));
	} else {
		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
	}

1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
	for_each_intel_plane(&i915->drm, plane) {
		const struct intel_plane_state *plane_state =
			to_intel_plane_state(plane->base.state);

		if (plane->fbc != fbc)
			continue;

		seq_printf(m, "%c [PLANE:%d:%s]: %s\n",
			   fbc->state.plane == plane ? '*' : ' ',
			   plane->base.base.id, plane->base.name,
			   plane_state->no_fbc_reason ?: "FBC possible");
	}

1765 1766 1767
	mutex_unlock(&fbc->lock);
	intel_runtime_pm_put(&i915->runtime_pm, wakeref);

1768 1769
	drm_modeset_unlock_all(&i915->drm);

1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
	return 0;
}

DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status);

static int intel_fbc_debugfs_false_color_get(void *data, u64 *val)
{
	struct intel_fbc *fbc = data;

	*val = fbc->false_color;

	return 0;
}

static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
{
	struct intel_fbc *fbc = data;

	mutex_lock(&fbc->lock);

	fbc->false_color = val;

	if (fbc->active)
		fbc->funcs->set_false_color(fbc, fbc->false_color);

	mutex_unlock(&fbc->lock);

	return 0;
}

DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
			intel_fbc_debugfs_false_color_get,
			intel_fbc_debugfs_false_color_set,
			"%llu\n");

1805 1806
static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
				  struct dentry *parent)
1807
{
1808 1809
	debugfs_create_file("i915_fbc_status", 0444, parent,
			    fbc, &intel_fbc_debugfs_status_fops);
1810 1811

	if (fbc->funcs->set_false_color)
1812 1813
		debugfs_create_file("i915_fbc_false_color", 0644, parent,
				    fbc, &intel_fbc_debugfs_false_color_fops);
1814 1815
}

1816 1817 1818 1819 1820 1821 1822 1823 1824
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
{
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);

	if (plane->fbc)
		intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry);
}

/* FIXME: remove this once igt is on board with per-crtc stuff */
1825 1826
void intel_fbc_debugfs_register(struct drm_i915_private *i915)
{
1827 1828
	struct drm_minor *minor = i915->drm.primary;
	struct intel_fbc *fbc;
1829

1830
	fbc = i915->fbc[INTEL_FBC_A];
1831
	if (fbc)
1832
		intel_fbc_debugfs_add(fbc, minor->debugfs_root);
1833
}