intel_fbc.c 46.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29
/**
 * DOC: Frame Buffer Compression (FBC)
 *
 * FBC tries to save memory bandwidth (and so power consumption) by
 * compressing the amount of memory used by the display. It is total
 * transparent to user space and completely handled in the kernel.
30 31
 *
 * The benefits of FBC are mostly visible with solid backgrounds and
R
Rodrigo Vivi 已提交
32 33
 * variation-less patterns. It comes from keeping the memory footprint small
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
R
Rodrigo Vivi 已提交
35 36 37 38
 * i915 is responsible to reserve stolen memory for FBC and configure its
 * offset on proper registers. The hardware takes care of all
 * compress/decompress. However there are many known cases where we have to
 * forcibly disable it to allow proper screen updates.
39 40
 */

41 42
#include <drm/drm_fourcc.h>

R
Rodrigo Vivi 已提交
43
#include "i915_drv.h"
44
#include "i915_trace.h"
45
#include "i915_vgpu.h"
46
#include "intel_cdclk.h"
47
#include "intel_de.h"
48
#include "intel_display_types.h"
49
#include "intel_fbc.h"
50
#include "intel_frontbuffer.h"
R
Rodrigo Vivi 已提交
51

52
struct intel_fbc_funcs {
53 54 55 56 57 58 59
	void (*activate)(struct intel_fbc *fbc);
	void (*deactivate)(struct intel_fbc *fbc);
	bool (*is_active)(struct intel_fbc *fbc);
	bool (*is_compressing)(struct intel_fbc *fbc);
	void (*nuke)(struct intel_fbc *fbc);
	void (*program_cfb)(struct intel_fbc *fbc);
	void (*set_false_color)(struct intel_fbc *fbc, bool enable);
60 61
};

62 63
/* plane stride in pixels */
static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
64
{
65 66 67
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int stride;

68
	stride = plane_state->view.color_plane[0].mapping_stride;
69 70 71 72 73 74 75
	if (!drm_rotation_90_or_270(plane_state->hw.rotation))
		stride /= fb->format->cpp[0];

	return stride;
}

/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
76
static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
77 78 79
{
	unsigned int cpp = 4; /* FBC always 4 bytes per pixel */

80
	return intel_fbc_plane_stride(plane_state) * cpp;
81 82 83
}

/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
84
static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state)
85
{
86
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
87 88
	unsigned int limit = 4; /* 1:4 compression limit is the worst case */
	unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
89
	unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
90 91 92 93
	unsigned int height = 4; /* FBC segment is 4 lines */
	unsigned int stride;

	/* minimum segment stride we can use */
94
	stride = width * cpp * height / limit;
95

96 97 98 99 100 101 102
	/*
	 * Wa_16011863758: icl+
	 * Avoid some hardware segment address miscalculation.
	 */
	if (DISPLAY_VER(i915) >= 11)
		stride += 64;

103 104 105 106 107 108 109 110 111 112 113
	/*
	 * At least some of the platforms require each 4 line segment to
	 * be 512 byte aligned. Just do it always for simplicity.
	 */
	stride = ALIGN(stride, 512);

	/* convert back to single line equivalent with 1:1 compression limit */
	return stride * limit / height;
}

/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
114
static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
115
{
116 117
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	unsigned int stride = _intel_fbc_cfb_stride(plane_state);
118 119 120 121 122 123

	/*
	 * At least some of the platforms require each 4 line segment to
	 * be 512 byte aligned. Aligning each line to 512 bytes guarantees
	 * that regardless of the compression limit we choose later.
	 */
124
	if (DISPLAY_VER(i915) >= 9)
125
		return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state));
126 127 128 129
	else
		return stride;
}

130
static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
131
{
132 133
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	int lines = drm_rect_height(&plane_state->uapi.src) >> 16;
134

V
Ville Syrjälä 已提交
135
	if (DISPLAY_VER(i915) == 7)
136
		lines = min(lines, 2048);
V
Ville Syrjälä 已提交
137
	else if (DISPLAY_VER(i915) >= 8)
138
		lines = min(lines, 2560);
139

140
	return lines * intel_fbc_cfb_stride(plane_state);
141 142
}

143
static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
144
{
145 146 147 148
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
	unsigned int stride = _intel_fbc_cfb_stride(plane_state);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
149 150 151 152 153 154 155 156 157

	/*
	 * Override stride in 64 byte units per 4 line segment.
	 *
	 * Gen9 hw miscalculates cfb stride for linear as
	 * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
	 * we always need to use the override there.
	 */
	if (stride != stride_aligned ||
158
	    (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR))
159 160 161 162 163
		return stride_aligned * 4 / 64;

	return 0;
}

164
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
165 166
{
	const struct intel_fbc_reg_params *params = &fbc->params;
167
	struct drm_i915_private *i915 = fbc->i915;
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
	unsigned int cfb_stride;
	u32 fbc_ctl;

	cfb_stride = params->cfb_stride / fbc->limit;

	/* FBC_CTL wants 32B or 64B units */
	if (DISPLAY_VER(i915) == 2)
		cfb_stride = (cfb_stride / 32) - 1;
	else
		cfb_stride = (cfb_stride / 64) - 1;

	fbc_ctl = FBC_CTL_PERIODIC |
		FBC_CTL_INTERVAL(params->interval) |
		FBC_CTL_STRIDE(cfb_stride);

	if (IS_I945GM(i915))
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */

	if (params->fence_id >= 0)
		fbc_ctl |= FBC_CTL_FENCENO(params->fence_id);

	return fbc_ctl;
}

192
static u32 i965_fbc_ctl2(struct intel_fbc *fbc)
193
{
194
	const struct intel_fbc_reg_params *params = &fbc->params;
195 196 197 198 199 200
	u32 fbc_ctl2;

	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM |
		FBC_CTL_PLANE(params->crtc.i9xx_plane);

	if (params->fence_id >= 0)
201
		fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN;
202 203 204 205

	return fbc_ctl2;
}

206
static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
207
{
208
	struct drm_i915_private *i915 = fbc->i915;
209 210 211
	u32 fbc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
212
	fbc_ctl = intel_de_read(i915, FBC_CONTROL);
213 214 215 216
	if ((fbc_ctl & FBC_CTL_EN) == 0)
		return;

	fbc_ctl &= ~FBC_CTL_EN;
V
Ville Syrjälä 已提交
217
	intel_de_write(i915, FBC_CONTROL, fbc_ctl);
218 219

	/* Wait for compressing bit to clear */
V
Ville Syrjälä 已提交
220
	if (intel_de_wait_for_clear(i915, FBC_STATUS,
221
				    FBC_STAT_COMPRESSING, 10)) {
V
Ville Syrjälä 已提交
222
		drm_dbg_kms(&i915->drm, "FBC idle timed out\n");
223 224 225 226
		return;
	}
}

227
static void i8xx_fbc_activate(struct intel_fbc *fbc)
228
{
229
	const struct intel_fbc_reg_params *params = &fbc->params;
230
	struct drm_i915_private *i915 = fbc->i915;
231 232 233 234
	int i;

	/* Clear old tags */
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
V
Ville Syrjälä 已提交
235
		intel_de_write(i915, FBC_TAG(i), 0);
236

V
Ville Syrjälä 已提交
237 238
	if (DISPLAY_VER(i915) == 4) {
		intel_de_write(i915, FBC_CONTROL2,
239
			       i965_fbc_ctl2(fbc));
V
Ville Syrjälä 已提交
240
		intel_de_write(i915, FBC_FENCE_OFF,
241
			       params->fence_y_offset);
242 243
	}

V
Ville Syrjälä 已提交
244
	intel_de_write(i915, FBC_CONTROL,
245
		       FBC_CTL_EN | i8xx_fbc_ctl(fbc));
246 247
}

248
static bool i8xx_fbc_is_active(struct intel_fbc *fbc)
249
{
250
	return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN;
251 252
}

253
static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc)
254
{
255
	return intel_de_read(fbc->i915, FBC_STATUS) &
256 257 258
		(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
}

259
static void i8xx_fbc_nuke(struct intel_fbc *fbc)
260
{
261
	struct intel_fbc_reg_params *params = &fbc->params;
262
	enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
263
	struct drm_i915_private *dev_priv = fbc->i915;
264 265 266 267 268 269 270

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
			  intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
	spin_unlock_irq(&dev_priv->uncore.lock);
}

271
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
272
{
273
	struct drm_i915_private *i915 = fbc->i915;
274 275 276 277 278 279 280 281 282 283 284 285

	GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
					 fbc->compressed_fb.start, U32_MAX));
	GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
					 fbc->compressed_llb.start, U32_MAX));

	intel_de_write(i915, FBC_CFB_BASE,
		       i915->dsm.start + fbc->compressed_fb.start);
	intel_de_write(i915, FBC_LL_BASE,
		       i915->dsm.start + fbc->compressed_llb.start);
}

286 287 288 289 290
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
	.activate = i8xx_fbc_activate,
	.deactivate = i8xx_fbc_deactivate,
	.is_active = i8xx_fbc_is_active,
	.is_compressing = i8xx_fbc_is_compressing,
291
	.nuke = i8xx_fbc_nuke,
292
	.program_cfb = i8xx_fbc_program_cfb,
293 294
};

295
static void i965_fbc_nuke(struct intel_fbc *fbc)
296
{
297
	struct intel_fbc_reg_params *params = &fbc->params;
298
	enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
299
	struct drm_i915_private *dev_priv = fbc->i915;
300 301 302 303 304 305 306 307 308 309 310 311 312

	spin_lock_irq(&dev_priv->uncore.lock);
	intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
			  intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
	spin_unlock_irq(&dev_priv->uncore.lock);
}

static const struct intel_fbc_funcs i965_fbc_funcs = {
	.activate = i8xx_fbc_activate,
	.deactivate = i8xx_fbc_deactivate,
	.is_active = i8xx_fbc_is_active,
	.is_compressing = i8xx_fbc_is_compressing,
	.nuke = i965_fbc_nuke,
313
	.program_cfb = i8xx_fbc_program_cfb,
314 315
};

316
static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc)
317
{
318
	switch (fbc->limit) {
319
	default:
320
		MISSING_CASE(fbc->limit);
321 322 323 324 325 326 327 328 329 330
		fallthrough;
	case 1:
		return DPFC_CTL_LIMIT_1X;
	case 2:
		return DPFC_CTL_LIMIT_2X;
	case 4:
		return DPFC_CTL_LIMIT_4X;
	}
}

331
static u32 g4x_dpfc_ctl(struct intel_fbc *fbc)
332
{
333 334
	const struct intel_fbc_reg_params *params = &fbc->params;
	struct drm_i915_private *i915 = fbc->i915;
335 336
	u32 dpfc_ctl;

337
	dpfc_ctl = g4x_dpfc_ctl_limit(fbc) |
338
		DPFC_CTL_PLANE_G4X(params->crtc.i9xx_plane);
339

340
	if (IS_G4X(i915))
341
		dpfc_ctl |= DPFC_CTL_SR_EN;
342

343
	if (params->fence_id >= 0) {
344
		dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X;
345 346

		if (DISPLAY_VER(i915) < 6)
347
			dpfc_ctl |= DPFC_CTL_FENCENO(params->fence_id);
348 349 350 351 352
	}

	return dpfc_ctl;
}

353
static void g4x_fbc_activate(struct intel_fbc *fbc)
354
{
355 356
	const struct intel_fbc_reg_params *params = &fbc->params;
	struct drm_i915_private *i915 = fbc->i915;
357

V
Ville Syrjälä 已提交
358
	intel_de_write(i915, DPFC_FENCE_YOFF,
359
		       params->fence_y_offset);
360

V
Ville Syrjälä 已提交
361
	intel_de_write(i915, DPFC_CONTROL,
362
		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
363 364
}

365
static void g4x_fbc_deactivate(struct intel_fbc *fbc)
366
{
367
	struct drm_i915_private *i915 = fbc->i915;
368 369 370
	u32 dpfc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
371
	dpfc_ctl = intel_de_read(i915, DPFC_CONTROL);
372 373
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
V
Ville Syrjälä 已提交
374
		intel_de_write(i915, DPFC_CONTROL, dpfc_ctl);
375 376 377
	}
}

378
static bool g4x_fbc_is_active(struct intel_fbc *fbc)
379
{
380
	return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN;
381 382
}

383
static bool g4x_fbc_is_compressing(struct intel_fbc *fbc)
384
{
385
	return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
386 387
}

388
static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
389
{
390
	struct drm_i915_private *i915 = fbc->i915;
391 392 393 394

	intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start);
}

395 396 397 398 399
static const struct intel_fbc_funcs g4x_fbc_funcs = {
	.activate = g4x_fbc_activate,
	.deactivate = g4x_fbc_deactivate,
	.is_active = g4x_fbc_is_active,
	.is_compressing = g4x_fbc_is_compressing,
400
	.nuke = i965_fbc_nuke,
401
	.program_cfb = g4x_fbc_program_cfb,
402 403
};

404
static void ilk_fbc_activate(struct intel_fbc *fbc)
405
{
406 407
	struct intel_fbc_reg_params *params = &fbc->params;
	struct drm_i915_private *i915 = fbc->i915;
408

V
Ville Syrjälä 已提交
409
	intel_de_write(i915, ILK_DPFC_FENCE_YOFF,
410
		       params->fence_y_offset);
411

V
Ville Syrjälä 已提交
412
	intel_de_write(i915, ILK_DPFC_CONTROL,
413
		       DPFC_CTL_EN | g4x_dpfc_ctl(fbc));
414 415
}

416
static void ilk_fbc_deactivate(struct intel_fbc *fbc)
417
{
418
	struct drm_i915_private *i915 = fbc->i915;
419 420 421
	u32 dpfc_ctl;

	/* Disable compression */
V
Ville Syrjälä 已提交
422
	dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL);
423 424
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
V
Ville Syrjälä 已提交
425
		intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl);
426 427 428
	}
}

429
static bool ilk_fbc_is_active(struct intel_fbc *fbc)
430
{
431
	return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
432 433
}

434
static bool ilk_fbc_is_compressing(struct intel_fbc *fbc)
435
{
436
	return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK;
437 438
}

439
static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
440
{
441
	struct drm_i915_private *i915 = fbc->i915;
442 443 444 445

	intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
}

446 447 448 449 450
static const struct intel_fbc_funcs ilk_fbc_funcs = {
	.activate = ilk_fbc_activate,
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
	.is_compressing = ilk_fbc_is_compressing,
451
	.nuke = i965_fbc_nuke,
452
	.program_cfb = ilk_fbc_program_cfb,
453 454
};

455
static void snb_fbc_program_fence(struct intel_fbc *fbc)
456
{
457 458
	const struct intel_fbc_reg_params *params = &fbc->params;
	struct drm_i915_private *i915 = fbc->i915;
459 460 461
	u32 ctl = 0;

	if (params->fence_id >= 0)
462
		ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(params->fence_id);
463 464

	intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
465
	intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, params->fence_y_offset);
466 467
}

468
static void snb_fbc_activate(struct intel_fbc *fbc)
469
{
470
	snb_fbc_program_fence(fbc);
471

472
	ilk_fbc_activate(fbc);
473 474
}

475
static void snb_fbc_nuke(struct intel_fbc *fbc)
476
{
477 478
	struct drm_i915_private *i915 = fbc->i915;

V
Ville Syrjälä 已提交
479 480
	intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE);
	intel_de_posting_read(i915, MSG_FBC_REND_STATE);
481 482 483 484 485 486 487 488
}

static const struct intel_fbc_funcs snb_fbc_funcs = {
	.activate = snb_fbc_activate,
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
	.is_compressing = ilk_fbc_is_compressing,
	.nuke = snb_fbc_nuke,
489
	.program_cfb = ilk_fbc_program_cfb,
490 491
};

492
static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc)
493
{
494
	const struct intel_fbc_reg_params *params = &fbc->params;
495
	struct drm_i915_private *i915 = fbc->i915;
496
	u32 val = 0;
497

498 499 500
	if (params->override_cfb_stride)
		val |= FBC_STRIDE_OVERRIDE |
			FBC_STRIDE(params->override_cfb_stride / fbc->limit);
501

502 503
	intel_de_write(i915, GLK_FBC_STRIDE, val);
}
504

505
static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc)
506 507
{
	const struct intel_fbc_reg_params *params = &fbc->params;
508
	struct drm_i915_private *i915 = fbc->i915;
509
	u32 val = 0;
510

511 512 513 514
	/* Display WA #0529: skl, kbl, bxt. */
	if (params->override_cfb_stride)
		val |= CHICKEN_FBC_STRIDE_OVERRIDE |
			CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit);
515

516 517 518 519 520
	intel_de_rmw(i915, CHICKEN_MISC_4,
		     CHICKEN_FBC_STRIDE_OVERRIDE |
		     CHICKEN_FBC_STRIDE_MASK, val);
}

521
static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
522
{
523 524
	const struct intel_fbc_reg_params *params = &fbc->params;
	struct drm_i915_private *i915 = fbc->i915;
525 526
	u32 dpfc_ctl;

527
	dpfc_ctl = g4x_dpfc_ctl_limit(fbc);
528

529
	if (IS_IVYBRIDGE(i915))
530
		dpfc_ctl |= DPFC_CTL_PLANE_IVB(params->crtc.i9xx_plane);
531

532
	if (params->fence_id >= 0)
533
		dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB;
534

535
	if (fbc->false_color)
536
		dpfc_ctl |= DPFC_CTL_FALSE_COLOR;
537

538 539 540
	return dpfc_ctl;
}

541
static void ivb_fbc_activate(struct intel_fbc *fbc)
542
{
543 544
	struct drm_i915_private *i915 = fbc->i915;

V
Ville Syrjälä 已提交
545
	if (DISPLAY_VER(i915) >= 10)
546
		glk_fbc_program_cfb_stride(fbc);
V
Ville Syrjälä 已提交
547
	else if (DISPLAY_VER(i915) == 9)
548
		skl_fbc_program_cfb_stride(fbc);
549

V
Ville Syrjälä 已提交
550
	if (i915->ggtt.num_fences)
551
		snb_fbc_program_fence(fbc);
552

V
Ville Syrjälä 已提交
553
	intel_de_write(i915, ILK_DPFC_CONTROL,
554
		       DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
555 556
}

557
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
558
{
559
	return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB;
560 561
}

562
static void ivb_fbc_set_false_color(struct intel_fbc *fbc,
563 564
				    bool enable)
{
565
	intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL,
566
		     DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0);
567 568
}

V
Ville Syrjälä 已提交
569 570
static const struct intel_fbc_funcs ivb_fbc_funcs = {
	.activate = ivb_fbc_activate,
571 572
	.deactivate = ilk_fbc_deactivate,
	.is_active = ilk_fbc_is_active,
V
Ville Syrjälä 已提交
573
	.is_compressing = ivb_fbc_is_compressing,
574
	.nuke = snb_fbc_nuke,
575
	.program_cfb = ilk_fbc_program_cfb,
576
	.set_false_color = ivb_fbc_set_false_color,
577 578
};

579
static bool intel_fbc_hw_is_active(struct intel_fbc *fbc)
580
{
581
	return fbc->funcs->is_active(fbc);
582 583
}

584
static void intel_fbc_hw_activate(struct intel_fbc *fbc)
585
{
586 587
	trace_intel_fbc_activate(fbc->crtc);

588
	fbc->active = true;
589
	fbc->activated = true;
590

591
	fbc->funcs->activate(fbc);
592 593
}

594
static void intel_fbc_hw_deactivate(struct intel_fbc *fbc)
595
{
596 597
	trace_intel_fbc_deactivate(fbc->crtc);

598 599
	fbc->active = false;

600
	fbc->funcs->deactivate(fbc);
601 602
}

603
bool intel_fbc_is_compressing(struct intel_fbc *fbc)
604
{
605
	return fbc->funcs->is_compressing(fbc);
606 607
}

608
static void intel_fbc_nuke(struct intel_fbc *fbc)
609 610 611
{
	trace_intel_fbc_nuke(fbc->crtc);

612
	fbc->funcs->nuke(fbc);
613 614
}

615
int intel_fbc_set_false_color(struct intel_fbc *fbc, bool enable)
616 617 618 619 620 621 622 623
{
	if (!fbc->funcs || !fbc->funcs->set_false_color)
		return -ENODEV;

	mutex_lock(&fbc->lock);

	fbc->false_color = enable;

624
	fbc->funcs->set_false_color(fbc, enable);
625 626 627 628 629 630

	mutex_unlock(&fbc->lock);

	return 0;
}

R
Rodrigo Vivi 已提交
631
/**
632
 * intel_fbc_is_active - Is FBC active?
633
 * @fbc: The FBC instance
R
Rodrigo Vivi 已提交
634 635
 *
 * This function is used to verify the current state of FBC.
D
Daniel Vetter 已提交
636
 *
R
Rodrigo Vivi 已提交
637
 * FIXME: This should be tracked in the plane config eventually
D
Daniel Vetter 已提交
638
 * instead of queried at runtime for most callers.
R
Rodrigo Vivi 已提交
639
 */
640
bool intel_fbc_is_active(struct intel_fbc *fbc)
641
{
642
	return fbc->active;
643 644
}

645
static void intel_fbc_activate(struct intel_fbc *fbc)
646
{
647 648
	intel_fbc_hw_activate(fbc);
	intel_fbc_nuke(fbc);
649 650
}

651
static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason)
P
Paulo Zanoni 已提交
652
{
653
	struct drm_i915_private *i915 = fbc->i915;
654

V
Ville Syrjälä 已提交
655
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
P
Paulo Zanoni 已提交
656

657
	if (fbc->active)
658
		intel_fbc_hw_deactivate(fbc);
659 660

	fbc->no_fbc_reason = reason;
661 662
}

663 664
static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
{
665
	if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
666 667 668 669 670
		return BIT_ULL(28);
	else
		return BIT_ULL(32);
}

V
Ville Syrjälä 已提交
671
static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
672
{
673 674 675 676 677 678
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
V
Ville Syrjälä 已提交
679 680 681
	if (IS_BROADWELL(i915) ||
	    (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
		end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
682
	else
683
		end = U64_MAX;
684

V
Ville Syrjälä 已提交
685
	return min(end, intel_fbc_cfb_base_max(i915));
686 687
}

688
static int intel_fbc_min_limit(const struct intel_plane_state *plane_state)
689
{
690 691
	int fb_cpp = plane_state->hw.fb ? plane_state->hw.fb->format->cpp[0] : 0;

692 693 694
	return fb_cpp == 2 ? 2 : 1;
}

V
Ville Syrjälä 已提交
695
static int intel_fbc_max_limit(struct drm_i915_private *i915)
696 697
{
	/* WaFbcOnly1to1Ratio:ctg */
V
Ville Syrjälä 已提交
698
	if (IS_G4X(i915))
699 700
		return 1;

701 702 703 704
	/*
	 * FBC2 can only do 1:1, 1:2, 1:4, we limit
	 * FBC1 to the same out of convenience.
	 */
705
	return 4;
706 707
}

708
static int find_compression_limit(struct intel_fbc *fbc,
709
				  unsigned int size, int min_limit)
710
{
711
	struct drm_i915_private *i915 = fbc->i915;
V
Ville Syrjälä 已提交
712
	u64 end = intel_fbc_stolen_end(i915);
713 714 715
	int ret, limit = min_limit;

	size /= limit;
716 717

	/* Try to over-allocate to reduce reallocations and fragmentation. */
V
Ville Syrjälä 已提交
718
	ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
719
						   size <<= 1, 4096, 0, end);
720
	if (ret == 0)
721
		return limit;
722

V
Ville Syrjälä 已提交
723 724
	for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) {
		ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
725 726 727
							   size >>= 1, 4096, 0, end);
		if (ret == 0)
			return limit;
728
	}
729 730

	return 0;
731 732
}

733
static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
734
			       unsigned int size, int min_limit)
735
{
736
	struct drm_i915_private *i915 = fbc->i915;
737
	int ret;
738

V
Ville Syrjälä 已提交
739
	drm_WARN_ON(&i915->drm,
740
		    drm_mm_node_allocated(&fbc->compressed_fb));
V
Ville Syrjälä 已提交
741
	drm_WARN_ON(&i915->drm,
742
		    drm_mm_node_allocated(&fbc->compressed_llb));
743

V
Ville Syrjälä 已提交
744 745
	if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) {
		ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
746 747 748 749 750
						  4096, 4096);
		if (ret)
			goto err;
	}

751
	ret = find_compression_limit(fbc, size, min_limit);
752 753
	if (!ret)
		goto err_llb;
754
	else if (ret > min_limit)
V
Ville Syrjälä 已提交
755
		drm_info_once(&i915->drm,
756
			      "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
757

758
	fbc->limit = ret;
759

V
Ville Syrjälä 已提交
760
	drm_dbg_kms(&i915->drm,
761 762
		    "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
		    fbc->compressed_fb.size, fbc->limit);
763 764 765 766

	return 0;

err_llb:
767
	if (drm_mm_node_allocated(&fbc->compressed_llb))
V
Ville Syrjälä 已提交
768
		i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
769
err:
V
Ville Syrjälä 已提交
770 771
	if (drm_mm_initialized(&i915->mm.stolen))
		drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
772 773 774
	return -ENOSPC;
}

775
static void intel_fbc_program_cfb(struct intel_fbc *fbc)
776
{
777
	fbc->funcs->program_cfb(fbc);
778 779
}

780
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
781
{
782
	struct drm_i915_private *i915 = fbc->i915;
783

784
	if (WARN_ON(intel_fbc_hw_is_active(fbc)))
785 786
		return;

787
	if (drm_mm_node_allocated(&fbc->compressed_llb))
V
Ville Syrjälä 已提交
788
		i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
789
	if (drm_mm_node_allocated(&fbc->compressed_fb))
V
Ville Syrjälä 已提交
790
		i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
791 792
}

793
void intel_fbc_cleanup(struct drm_i915_private *i915)
P
Paulo Zanoni 已提交
794
{
V
Ville Syrjälä 已提交
795
	struct intel_fbc *fbc = &i915->fbc;
796

V
Ville Syrjälä 已提交
797
	if (!HAS_FBC(i915))
798 799
		return;

800
	mutex_lock(&fbc->lock);
801
	__intel_fbc_cleanup_cfb(fbc);
802
	mutex_unlock(&fbc->lock);
P
Paulo Zanoni 已提交
803 804
}

805
static bool stride_is_valid(const struct intel_plane_state *plane_state)
806
{
807 808 809 810 811
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int stride = intel_fbc_plane_stride(plane_state) *
		fb->format->cpp[0];

812
	/* This should have been caught earlier. */
V
Ville Syrjälä 已提交
813
	if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0))
814
		return false;
815 816

	/* Below are the additional FBC restrictions. */
817 818
	if (stride < 512)
		return false;
819

V
Ville Syrjälä 已提交
820
	if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3)
821 822
		return stride == 4096 || stride == 8192;

V
Ville Syrjälä 已提交
823
	if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048)
824 825
		return false;

826
	/* Display WA #1105: skl,bxt,kbl,cfl,glk */
V
Ville Syrjälä 已提交
827
	if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) &&
828
	    fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
829 830
		return false;

831 832 833 834 835 836
	if (stride > 16384)
		return false;

	return true;
}

837
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
838
{
839 840 841 842
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;

	switch (fb->format->format) {
843 844 845 846 847 848
	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_XBGR8888:
		return true;
	case DRM_FORMAT_XRGB1555:
	case DRM_FORMAT_RGB565:
		/* 16bpp not supported on gen2 */
V
Ville Syrjälä 已提交
849
		if (DISPLAY_VER(i915) == 2)
850 851
			return false;
		/* WaFbcOnly1to1Ratio:ctg */
V
Ville Syrjälä 已提交
852
		if (IS_G4X(i915))
853 854 855 856 857 858 859
			return false;
		return true;
	default:
		return false;
	}
}

860
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
861
{
862 863 864 865 866
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	unsigned int rotation = plane_state->hw.rotation;

	if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 &&
867 868
	    drm_rotation_90_or_270(rotation))
		return false;
V
Ville Syrjälä 已提交
869
	else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) &&
870 871 872 873 874 875
		 rotation != DRM_MODE_ROTATE_0)
		return false;

	return true;
}

876 877 878
/*
 * For some reason, the hardware tracking starts looking at whatever we
 * programmed as the display plane base address register. It does not look at
879 880
 * the X and Y offset registers. That's why we include the src x/y offsets
 * instead of just looking at the plane size.
881
 */
882
static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state)
883
{
884
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
885
	unsigned int effective_w, effective_h, max_w, max_h;
886

V
Ville Syrjälä 已提交
887
	if (DISPLAY_VER(i915) >= 10) {
888 889
		max_w = 5120;
		max_h = 4096;
V
Ville Syrjälä 已提交
890
	} else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
891 892
		max_w = 4096;
		max_h = 4096;
V
Ville Syrjälä 已提交
893
	} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
894 895 896 897 898 899 900
		max_w = 4096;
		max_h = 2048;
	} else {
		max_w = 2048;
		max_h = 1536;
	}

901 902 903 904
	effective_w = plane_state->view.color_plane[0].x +
		(drm_rect_width(&plane_state->uapi.src) >> 16);
	effective_h = plane_state->view.color_plane[0].y +
		(drm_rect_height(&plane_state->uapi.src) >> 16);
905 906

	return effective_w <= max_w && effective_h <= max_h;
907 908
}

909
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
910
{
911 912 913 914
	struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
	const struct drm_framebuffer *fb = plane_state->hw.fb;

	switch (fb->modifier) {
915 916
	case DRM_FORMAT_MOD_LINEAR:
	case I915_FORMAT_MOD_Y_TILED:
917
	case I915_FORMAT_MOD_Yf_TILED:
V
Ville Syrjälä 已提交
918
		return DISPLAY_VER(i915) >= 9;
919
	case I915_FORMAT_MOD_X_TILED:
920 921 922 923 924 925
		return true;
	default:
		return false;
	}
}

926 927 928
static void intel_fbc_update_state_cache(struct intel_atomic_state *state,
					 struct intel_crtc *crtc,
					 struct intel_plane *plane)
929
{
930 931 932 933 934 935
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	struct intel_fbc *fbc = plane->fbc;
936
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
937

938 939
	cache->no_fbc_reason = plane_state->no_fbc_reason;
	if (cache->no_fbc_reason)
940
		return;
941

942 943
	/* FBC1 compression interval: arbitrary choice of 1 second */
	cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
944

945 946
	cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);

V
Ville Syrjälä 已提交
947
	drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE &&
948
		    !plane_state->ggtt_vma->fence);
949 950

	if (plane_state->flags & PLANE_HAS_FENCE &&
951 952
	    plane_state->ggtt_vma->fence)
		cache->fence_id = plane_state->ggtt_vma->fence->id;
953 954
	else
		cache->fence_id = -1;
955 956 957 958

	cache->cfb_stride = intel_fbc_cfb_stride(plane_state);
	cache->cfb_size = intel_fbc_cfb_size(plane_state);
	cache->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state);
959 960
}

961
static bool intel_fbc_cfb_size_changed(struct intel_fbc *fbc)
962
{
963
	return fbc->state_cache.cfb_size > fbc->compressed_fb.size * fbc->limit;
964 965
}

966
static bool intel_fbc_can_enable(struct intel_fbc *fbc)
967
{
968
	struct drm_i915_private *i915 = fbc->i915;
969

V
Ville Syrjälä 已提交
970
	if (intel_vgpu_active(i915)) {
971 972 973 974
		fbc->no_fbc_reason = "VGPU is active";
		return false;
	}

V
Ville Syrjälä 已提交
975
	if (!i915->params.enable_fbc) {
976 977 978 979 980 981 982 983 984 985 986 987
		fbc->no_fbc_reason = "disabled per module param or by default";
		return false;
	}

	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

	return true;
}

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
static int intel_fbc_check_plane(struct intel_atomic_state *state,
				 struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	const struct drm_framebuffer *fb = plane_state->hw.fb;
	struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
	const struct intel_crtc_state *crtc_state;
	struct intel_fbc *fbc = plane->fbc;

	if (!fbc)
		return 0;

	if (!plane_state->uapi.visible) {
		plane_state->no_fbc_reason = "plane not visible";
		return 0;
	}

	crtc_state = intel_atomic_get_new_crtc_state(state, crtc);

	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
		plane_state->no_fbc_reason = "interlaced mode not supported";
		return 0;
	}

	/*
	 * Display 12+ is not supporting FBC with PSR2.
	 * Recommendation is to keep this combination disabled
	 * Bspec: 50422 HSD: 14010260002
	 */
	if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
		plane_state->no_fbc_reason = "PSR2 enabled";
		return false;
	}

	if (!pixel_format_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "pixel format not supported";
		return 0;
	}

	if (!tiling_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "tiling not supported";
		return 0;
	}

	if (!rotation_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "rotation not supported";
		return 0;
	}

	if (!stride_is_valid(plane_state)) {
		plane_state->no_fbc_reason = "stride not supported";
		return 0;
	}

	if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
	    fb->format->has_alpha) {
		plane_state->no_fbc_reason = "per-pixel alpha not supported";
		return false;
	}

	if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
		plane_state->no_fbc_reason = "plane size too big";
		return 0;
	}

	/*
	 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
	 * having a Y offset that isn't divisible by 4 causes FIFO underrun
	 * and screen flicker.
	 */
	if (DISPLAY_VER(i915) >= 9 &&
	    plane_state->view.color_plane[0].y & 3) {
		plane_state->no_fbc_reason = "plane start Y offset misaligned";
		return false;
	}

	/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
	if (DISPLAY_VER(i915) >= 11 &&
	    (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
		plane_state->no_fbc_reason = "plane end Y offset misaligned";
		return false;
	}

	/* WaFbcExceedCdClockThreshold:hsw,bdw */
	if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
		const struct intel_cdclk_state *cdclk_state;

		cdclk_state = intel_atomic_get_cdclk_state(state);
		if (IS_ERR(cdclk_state))
			return PTR_ERR(cdclk_state);

		if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) {
			plane_state->no_fbc_reason = "pixel rate too high";
			return 0;
		}
	}

	plane_state->no_fbc_reason = NULL;

	return 0;
}

1092 1093
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
V
Ville Syrjälä 已提交
1094 1095
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &i915->fbc;
1096 1097
	struct intel_fbc_state_cache *cache = &fbc->state_cache;

1098
	if (!intel_fbc_can_enable(fbc))
1099 1100
		return false;

1101 1102
	if (cache->no_fbc_reason) {
		fbc->no_fbc_reason = cache->no_fbc_reason;
1103 1104 1105
		return false;
	}

1106 1107 1108 1109 1110 1111 1112 1113
	/* We don't need to use a state cache here since this information is
	 * global for all CRTC.
	 */
	if (fbc->underrun_detected) {
		fbc->no_fbc_reason = "underrun detected";
		return false;
	}

1114 1115 1116 1117 1118 1119
	/* The use of a CPU fence is one of two ways to detect writes by the
	 * CPU to the scanout and trigger updates to the FBC.
	 *
	 * The other method is by software tracking (see
	 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
	 * the current compressed buffer and recompress it.
1120 1121
	 *
	 * Note that is possible for a tiled surface to be unmappable (and
1122
	 * so have no fence associated with it) due to aperture constraints
1123
	 * at the time of pinning.
1124 1125 1126 1127
	 *
	 * FIXME with 90/270 degree rotation we should use the fence on
	 * the normal GTT view (the rotated view doesn't even have a
	 * fence). Would need changes to the FBC fence Y offset as well.
1128
	 * For now this will effectively disable FBC with 90/270 degree
1129
	 * rotation.
1130
	 */
V
Ville Syrjälä 已提交
1131
	if (DISPLAY_VER(i915) < 9 && cache->fence_id < 0) {
1132 1133
		fbc->no_fbc_reason = "framebuffer not tiled or fenced";
		return false;
1134
	}
1135

1136 1137
	/*
	 * It is possible for the required CFB size change without a
1138 1139 1140 1141 1142 1143 1144 1145
	 * crtc->disable + crtc->enable since it is possible to change the
	 * stride without triggering a full modeset. Since we try to
	 * over-allocate the CFB, there's a chance we may keep FBC enabled even
	 * if this happens, but if we exceed the current CFB size we'll have to
	 * disable FBC. Notice that it would be possible to disable FBC, wait
	 * for a frame, free the stolen node, then try to reenable FBC in case
	 * we didn't get any invalidate/deactivate calls, but this would require
	 * a lot of tracking just for a specific case. If we conclude it's an
1146 1147
	 * important case, we can implement it later.
	 */
1148
	if (intel_fbc_cfb_size_changed(fbc)) {
1149
		fbc->no_fbc_reason = "CFB requirements changed";
1150 1151 1152 1153 1154 1155
		return false;
	}

	return true;
}

1156 1157
static void intel_fbc_get_reg_params(struct intel_fbc *fbc,
				     struct intel_crtc *crtc)
1158
{
1159 1160
	const struct intel_fbc_state_cache *cache = &fbc->state_cache;
	struct intel_fbc_reg_params *params = &fbc->params;
1161 1162 1163 1164 1165 1166

	/* Since all our fields are integer types, use memset here so the
	 * comparison function can rely on memcmp because the padding will be
	 * zero. */
	memset(params, 0, sizeof(*params));

1167
	params->fence_id = cache->fence_id;
1168
	params->fence_y_offset = cache->fence_y_offset;
1169

1170
	params->interval = cache->interval;
V
Ville Syrjälä 已提交
1171
	params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
1172

1173 1174 1175
	params->cfb_stride = cache->cfb_stride;
	params->cfb_size = cache->cfb_size;
	params->override_cfb_stride = cache->override_cfb_stride;
1176 1177
}

1178 1179 1180
static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
				    struct intel_crtc *crtc,
				    struct intel_plane *plane)
1181
{
1182 1183 1184 1185 1186 1187 1188 1189 1190
	struct intel_fbc *fbc = plane->fbc;
	const struct intel_crtc_state *new_crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	const struct intel_plane_state *old_plane_state =
		intel_atomic_get_old_plane_state(state, plane);
	const struct intel_plane_state *new_plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	const struct drm_framebuffer *old_fb = old_plane_state->hw.fb;
	const struct drm_framebuffer *new_fb = new_plane_state->hw.fb;
1191 1192 1193
	const struct intel_fbc_state_cache *cache = &fbc->state_cache;
	const struct intel_fbc_reg_params *params = &fbc->params;

1194
	if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi))
1195 1196 1197 1198 1199
		return false;

	if (!intel_fbc_can_activate(crtc))
		return false;

1200 1201 1202 1203
	if (!old_fb || !new_fb)
		return false;

	if (old_fb->format->format != new_fb->format->format)
1204 1205
		return false;

1206
	if (old_fb->modifier != new_fb->modifier)
1207 1208
		return false;

1209 1210
	if (intel_fbc_plane_stride(old_plane_state) !=
	    intel_fbc_plane_stride(new_plane_state))
1211 1212
		return false;

1213
	if (params->cfb_stride != cache->cfb_stride)
1214 1215
		return false;

1216
	if (params->cfb_size != cache->cfb_size)
1217 1218
		return false;

1219
	if (params->override_cfb_stride != cache->override_cfb_stride)
1220 1221 1222 1223 1224
		return false;

	return true;
}

1225 1226
bool intel_fbc_pre_update(struct intel_atomic_state *state,
			  struct intel_crtc *crtc)
1227
{
1228 1229 1230
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
V
Ville Syrjälä 已提交
1231
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1232
	struct intel_fbc *fbc = plane->fbc;
1233
	const char *reason = "update pending";
1234
	bool need_vblank_wait = false;
1235

1236
	if (!fbc || !plane_state)
1237 1238
		return need_vblank_wait;

1239
	mutex_lock(&fbc->lock);
1240

V
Ville Syrjälä 已提交
1241
	if (fbc->crtc != crtc)
1242
		goto unlock;
1243

1244
	intel_fbc_update_state_cache(state, crtc, plane);
1245
	fbc->flip_pending = true;
1246

1247
	if (!intel_fbc_can_flip_nuke(state, crtc, plane)) {
1248
		intel_fbc_deactivate(fbc, reason);
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263

		/*
		 * Display WA #1198: glk+
		 * Need an extra vblank wait between FBC disable and most plane
		 * updates. Bspec says this is only needed for plane disable, but
		 * that is not true. Touching most plane registers will cause the
		 * corruption to appear. Also SKL/derivatives do not seem to be
		 * affected.
		 *
		 * TODO: could optimize this a bit by sampling the frame
		 * counter when we disable FBC (if it was already done earlier)
		 * and skipping the extra vblank wait before the plane update
		 * if at least one frame has already passed.
		 */
		if (fbc->activated &&
V
Ville Syrjälä 已提交
1264
		    DISPLAY_VER(i915) >= 10)
1265 1266 1267
			need_vblank_wait = true;
		fbc->activated = false;
	}
1268 1269
unlock:
	mutex_unlock(&fbc->lock);
1270 1271

	return need_vblank_wait;
1272 1273
}

1274
static void __intel_fbc_disable(struct intel_fbc *fbc)
1275
{
1276
	struct drm_i915_private *i915 = fbc->i915;
1277 1278
	struct intel_crtc *crtc = fbc->crtc;

V
Ville Syrjälä 已提交
1279 1280 1281
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
	drm_WARN_ON(&i915->drm, !fbc->crtc);
	drm_WARN_ON(&i915->drm, fbc->active);
1282

V
Ville Syrjälä 已提交
1283
	drm_dbg_kms(&i915->drm, "Disabling FBC on pipe %c\n",
1284
		    pipe_name(crtc->pipe));
1285

1286
	__intel_fbc_cleanup_cfb(fbc);
1287 1288 1289 1290

	fbc->crtc = NULL;
}

1291
static void __intel_fbc_post_update(struct intel_crtc *crtc)
1292
{
V
Ville Syrjälä 已提交
1293 1294
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &i915->fbc;
1295

V
Ville Syrjälä 已提交
1296
	drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock));
1297

V
Ville Syrjälä 已提交
1298
	if (fbc->crtc != crtc)
1299 1300
		return;

1301 1302
	fbc->flip_pending = false;

V
Ville Syrjälä 已提交
1303
	if (!i915->params.enable_fbc) {
1304 1305
		intel_fbc_deactivate(fbc, "disabled at runtime per module param");
		__intel_fbc_disable(fbc);
1306 1307 1308 1309

		return;
	}

1310
	intel_fbc_get_reg_params(fbc, crtc);
1311

1312
	if (!intel_fbc_can_activate(crtc))
1313 1314
		return;

1315
	if (!fbc->busy_bits)
1316
		intel_fbc_activate(fbc);
1317
	else
1318
		intel_fbc_deactivate(fbc, "frontbuffer write");
P
Paulo Zanoni 已提交
1319 1320
}

1321 1322
void intel_fbc_post_update(struct intel_atomic_state *state,
			   struct intel_crtc *crtc)
P
Paulo Zanoni 已提交
1323
{
1324 1325 1326
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
1327
	struct intel_fbc *fbc = plane->fbc;
1328

1329
	if (!fbc || !plane_state)
1330 1331
		return;

1332
	mutex_lock(&fbc->lock);
1333
	__intel_fbc_post_update(crtc);
1334
	mutex_unlock(&fbc->lock);
1335 1336
}

1337 1338
static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
{
V
Ville Syrjälä 已提交
1339
	if (fbc->crtc)
1340 1341 1342 1343 1344
		return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
	else
		return fbc->possible_framebuffer_bits;
}

V
Ville Syrjälä 已提交
1345
void intel_fbc_invalidate(struct drm_i915_private *i915,
1346 1347 1348
			  unsigned int frontbuffer_bits,
			  enum fb_op_origin origin)
{
V
Ville Syrjälä 已提交
1349
	struct intel_fbc *fbc = &i915->fbc;
1350

V
Ville Syrjälä 已提交
1351
	if (!HAS_FBC(i915))
1352 1353
		return;

1354
	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1355 1356
		return;

1357
	mutex_lock(&fbc->lock);
P
Paulo Zanoni 已提交
1358

1359
	fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
1360

V
Ville Syrjälä 已提交
1361
	if (fbc->crtc && fbc->busy_bits)
1362
		intel_fbc_deactivate(fbc, "frontbuffer write");
P
Paulo Zanoni 已提交
1363

1364
	mutex_unlock(&fbc->lock);
1365 1366
}

V
Ville Syrjälä 已提交
1367
void intel_fbc_flush(struct drm_i915_private *i915,
1368
		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
1369
{
V
Ville Syrjälä 已提交
1370
	struct intel_fbc *fbc = &i915->fbc;
1371

V
Ville Syrjälä 已提交
1372
	if (!HAS_FBC(i915))
1373 1374
		return;

1375
	mutex_lock(&fbc->lock);
1376

1377
	fbc->busy_bits &= ~frontbuffer_bits;
1378

1379
	if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1380 1381
		goto out;

V
Ville Syrjälä 已提交
1382
	if (!fbc->busy_bits && fbc->crtc &&
1383
	    (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1384
		if (fbc->active)
1385
			intel_fbc_nuke(fbc);
1386
		else if (!fbc->flip_pending)
1387
			__intel_fbc_post_update(fbc->crtc);
1388
	}
P
Paulo Zanoni 已提交
1389

1390
out:
1391
	mutex_unlock(&fbc->lock);
1392 1393
}

1394
int intel_fbc_atomic_check(struct intel_atomic_state *state)
1395
{
1396
	struct intel_plane_state *plane_state;
1397
	struct intel_plane *plane;
1398
	int i;
1399

1400
	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1401
		int ret;
1402

1403 1404 1405
		ret = intel_fbc_check_plane(state, plane);
		if (ret)
			return ret;
1406 1407
	}

1408
	return 0;
1409 1410
}

1411 1412 1413
/**
 * intel_fbc_enable: tries to enable FBC on the CRTC
 * @crtc: the CRTC
1414
 * @state: corresponding &drm_crtc_state for @crtc
1415
 *
1416
 * This function checks if the given CRTC was chosen for FBC, then enables it if
1417 1418 1419
 * possible. Notice that it doesn't activate FBC. It is valid to call
 * intel_fbc_enable multiple times for the same pipe without an
 * intel_fbc_disable in the middle, as long as it is deactivated.
1420
 */
1421 1422
static void intel_fbc_enable(struct intel_atomic_state *state,
			     struct intel_crtc *crtc)
1423
{
V
Ville Syrjälä 已提交
1424
	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1425 1426 1427
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
1428 1429
	struct intel_fbc *fbc = plane->fbc;
	struct intel_fbc_state_cache *cache;
1430
	int min_limit;
1431

1432
	if (!fbc || !plane_state)
1433 1434
		return;

1435 1436
	cache = &fbc->state_cache;

1437
	min_limit = intel_fbc_min_limit(plane_state);
1438

1439
	mutex_lock(&fbc->lock);
1440

V
Ville Syrjälä 已提交
1441
	if (fbc->crtc) {
1442 1443 1444 1445
		if (fbc->crtc != crtc)
			goto out;

		if (fbc->limit >= min_limit &&
1446
		    !intel_fbc_cfb_size_changed(fbc))
1447
			goto out;
1448

1449
		__intel_fbc_disable(fbc);
1450
	}
1451

V
Ville Syrjälä 已提交
1452
	drm_WARN_ON(&i915->drm, fbc->active);
1453

1454
	intel_fbc_update_state_cache(state, crtc, plane);
1455

1456
	if (cache->no_fbc_reason)
1457 1458
		goto out;

1459
	if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state), min_limit)) {
1460
		fbc->no_fbc_reason = "not enough stolen memory";
1461 1462 1463
		goto out;
	}

V
Ville Syrjälä 已提交
1464
	drm_dbg_kms(&i915->drm, "Enabling FBC on pipe %c\n",
1465
		    pipe_name(crtc->pipe));
1466
	fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1467

1468
	fbc->crtc = crtc;
1469

1470
	intel_fbc_program_cfb(fbc);
1471
out:
1472
	mutex_unlock(&fbc->lock);
1473 1474 1475
}

/**
1476
 * intel_fbc_disable - disable FBC if it's associated with crtc
1477 1478 1479 1480
 * @crtc: the CRTC
 *
 * This function disables FBC if it's associated with the provided CRTC.
 */
1481
void intel_fbc_disable(struct intel_crtc *crtc)
1482
{
1483
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1484
	struct intel_fbc *fbc = plane->fbc;
1485

1486
	if (!fbc)
1487 1488
		return;

1489
	mutex_lock(&fbc->lock);
1490
	if (fbc->crtc == crtc)
1491
		__intel_fbc_disable(fbc);
1492
	mutex_unlock(&fbc->lock);
1493 1494
}

1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
/**
 * intel_fbc_update: enable/disable FBC on the CRTC
 * @state: atomic state
 * @crtc: the CRTC
 *
 * This function checks if the given CRTC was chosen for FBC, then enables it if
 * possible. Notice that it doesn't activate FBC. It is valid to call
 * intel_fbc_update multiple times for the same pipe without an
 * intel_fbc_disable in the middle.
 */
void intel_fbc_update(struct intel_atomic_state *state,
		      struct intel_crtc *crtc)
{
1508
	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1509 1510
	const struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
1511 1512 1513 1514 1515 1516
	const struct intel_plane_state *plane_state =
		intel_atomic_get_new_plane_state(state, plane);
	struct intel_fbc *fbc = plane->fbc;

	if (!fbc || !plane_state)
		return;
1517

1518
	if (crtc_state->update_pipe && plane_state->no_fbc_reason)
1519 1520 1521 1522 1523
		intel_fbc_disable(crtc);
	else
		intel_fbc_enable(state, crtc);
}

1524
/**
1525
 * intel_fbc_global_disable - globally disable FBC
V
Ville Syrjälä 已提交
1526
 * @i915: i915 device instance
1527 1528 1529
 *
 * This function disables FBC regardless of which CRTC is associated with it.
 */
V
Ville Syrjälä 已提交
1530
void intel_fbc_global_disable(struct drm_i915_private *i915)
1531
{
V
Ville Syrjälä 已提交
1532
	struct intel_fbc *fbc = &i915->fbc;
1533

V
Ville Syrjälä 已提交
1534
	if (!HAS_FBC(i915))
1535 1536
		return;

1537
	mutex_lock(&fbc->lock);
V
Ville Syrjälä 已提交
1538
	if (fbc->crtc) {
V
Ville Syrjälä 已提交
1539
		drm_WARN_ON(&i915->drm, fbc->crtc->active);
1540
		__intel_fbc_disable(fbc);
1541
	}
1542
	mutex_unlock(&fbc->lock);
1543 1544
}

1545 1546
static void intel_fbc_underrun_work_fn(struct work_struct *work)
{
V
Ville Syrjälä 已提交
1547
	struct drm_i915_private *i915 =
1548
		container_of(work, struct drm_i915_private, fbc.underrun_work);
V
Ville Syrjälä 已提交
1549
	struct intel_fbc *fbc = &i915->fbc;
1550 1551 1552 1553

	mutex_lock(&fbc->lock);

	/* Maybe we were scheduled twice. */
V
Ville Syrjälä 已提交
1554
	if (fbc->underrun_detected || !fbc->crtc)
1555 1556
		goto out;

V
Ville Syrjälä 已提交
1557
	drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n");
1558 1559
	fbc->underrun_detected = true;

1560
	intel_fbc_deactivate(fbc, "FIFO underrun");
1561 1562 1563 1564
out:
	mutex_unlock(&fbc->lock);
}

1565 1566
/*
 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1567
 * @fbc: The FBC instance
1568 1569 1570 1571
 *
 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
 * want to re-enable FBC after an underrun to increase test coverage.
 */
1572
int intel_fbc_reset_underrun(struct intel_fbc *fbc)
1573
{
1574
	struct drm_i915_private *i915 = fbc->i915;
1575 1576
	int ret;

1577
	cancel_work_sync(&fbc->underrun_work);
1578

1579
	ret = mutex_lock_interruptible(&fbc->lock);
1580 1581 1582
	if (ret)
		return ret;

1583
	if (fbc->underrun_detected) {
V
Ville Syrjälä 已提交
1584
		drm_dbg_kms(&i915->drm,
1585
			    "Re-allowing FBC after fifo underrun\n");
1586
		fbc->no_fbc_reason = "FIFO underrun cleared";
1587 1588
	}

1589 1590
	fbc->underrun_detected = false;
	mutex_unlock(&fbc->lock);
1591 1592 1593 1594

	return 0;
}

1595 1596
/**
 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1597
 * @fbc: The FBC instance
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
 *
 * Without FBC, most underruns are harmless and don't really cause too many
 * problems, except for an annoying message on dmesg. With FBC, underruns can
 * become black screens or even worse, especially when paired with bad
 * watermarks. So in order for us to be on the safe side, completely disable FBC
 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
 * already suggests that watermarks may be bad, so try to be as safe as
 * possible.
 *
 * This function is called from the IRQ handler.
 */
1609
void intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
1610
{
1611
	if (!HAS_FBC(fbc->i915))
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
		return;

	/* There's no guarantee that underrun_detected won't be set to true
	 * right after this check and before the work is scheduled, but that's
	 * not a problem since we'll check it again under the work function
	 * while FBC is locked. This check here is just to prevent us from
	 * unnecessarily scheduling the work, and it relies on the fact that we
	 * never switch underrun_detect back to false after it's true. */
	if (READ_ONCE(fbc->underrun_detected))
		return;

	schedule_work(&fbc->underrun_work);
}

1626 1627 1628 1629 1630 1631 1632 1633 1634
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
V
Ville Syrjälä 已提交
1635
static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
1636
{
V
Ville Syrjälä 已提交
1637 1638
	if (i915->params.enable_fbc >= 0)
		return !!i915->params.enable_fbc;
1639

V
Ville Syrjälä 已提交
1640
	if (!HAS_FBC(i915))
1641 1642
		return 0;

V
Ville Syrjälä 已提交
1643
	if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9)
1644 1645 1646 1647 1648
		return 1;

	return 0;
}

V
Ville Syrjälä 已提交
1649
static bool need_fbc_vtd_wa(struct drm_i915_private *i915)
1650 1651
{
	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1652
	if (intel_vtd_active() &&
V
Ville Syrjälä 已提交
1653 1654
	    (IS_SKYLAKE(i915) || IS_BROXTON(i915))) {
		drm_info(&i915->drm,
1655
			 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1656 1657 1658 1659 1660 1661
		return true;
	}

	return false;
}

R
Rodrigo Vivi 已提交
1662 1663
/**
 * intel_fbc_init - Initialize FBC
V
Ville Syrjälä 已提交
1664
 * @i915: the i915 device
R
Rodrigo Vivi 已提交
1665 1666 1667
 *
 * This function might be called during PM init process.
 */
V
Ville Syrjälä 已提交
1668
void intel_fbc_init(struct drm_i915_private *i915)
1669
{
V
Ville Syrjälä 已提交
1670
	struct intel_fbc *fbc = &i915->fbc;
1671

1672
	fbc->i915 = i915;
1673
	INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1674 1675
	mutex_init(&fbc->lock);
	fbc->active = false;
P
Paulo Zanoni 已提交
1676

V
Ville Syrjälä 已提交
1677 1678
	if (!drm_mm_initialized(&i915->mm.stolen))
		mkwrite_device_info(i915)->display.has_fbc = false;
1679

V
Ville Syrjälä 已提交
1680 1681
	if (need_fbc_vtd_wa(i915))
		mkwrite_device_info(i915)->display.has_fbc = false;
1682

V
Ville Syrjälä 已提交
1683 1684 1685
	i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
	drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
		    i915->params.enable_fbc);
1686

V
Ville Syrjälä 已提交
1687
	if (!HAS_FBC(i915)) {
1688
		fbc->no_fbc_reason = "unsupported by this chipset";
1689 1690 1691
		return;
	}

V
Ville Syrjälä 已提交
1692
	if (DISPLAY_VER(i915) >= 7)
V
Ville Syrjälä 已提交
1693
		fbc->funcs = &ivb_fbc_funcs;
V
Ville Syrjälä 已提交
1694
	else if (DISPLAY_VER(i915) == 6)
1695
		fbc->funcs = &snb_fbc_funcs;
V
Ville Syrjälä 已提交
1696
	else if (DISPLAY_VER(i915) == 5)
1697
		fbc->funcs = &ilk_fbc_funcs;
V
Ville Syrjälä 已提交
1698
	else if (IS_G4X(i915))
1699
		fbc->funcs = &g4x_fbc_funcs;
V
Ville Syrjälä 已提交
1700
	else if (DISPLAY_VER(i915) == 4)
1701
		fbc->funcs = &i965_fbc_funcs;
1702 1703 1704
	else
		fbc->funcs = &i8xx_fbc_funcs;

1705
	/* We still don't have any sort of hardware state readout for FBC, so
1706 1707
	 * deactivate it in case the BIOS activated it to make sure software
	 * matches the hardware state. */
1708 1709
	if (intel_fbc_hw_is_active(fbc))
		intel_fbc_hw_deactivate(fbc);
1710
}