intel_fbc.c 29.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

R
Rodrigo Vivi 已提交
24 25 26 27 28 29
/**
 * DOC: Frame Buffer Compression (FBC)
 *
 * FBC tries to save memory bandwidth (and so power consumption) by
 * compressing the amount of memory used by the display. It is total
 * transparent to user space and completely handled in the kernel.
30 31
 *
 * The benefits of FBC are mostly visible with solid backgrounds and
R
Rodrigo Vivi 已提交
32 33
 * variation-less patterns. It comes from keeping the memory footprint small
 * and having fewer memory pages opened and accessed for refreshing the display.
34
 *
R
Rodrigo Vivi 已提交
35 36 37 38
 * i915 is responsible to reserve stolen memory for FBC and configure its
 * offset on proper registers. The hardware takes care of all
 * compress/decompress. However there are many known cases where we have to
 * forcibly disable it to allow proper screen updates.
39 40
 */

R
Rodrigo Vivi 已提交
41 42 43
#include "intel_drv.h"
#include "i915_drv.h"

44 45 46 47 48 49 50 51 52 53 54 55 56
/*
 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
 * origin so the x and y offsets can actually fit the registers. As a
 * consequence, the fence doesn't really start exactly at the display plane
 * address we program because it starts at the real start of the buffer, so we
 * have to take this into consideration here.
 */
static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
{
	return crtc->base.y - crtc->adjusted_y;
}

57
static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
{
	u32 fbc_ctl;

	dev_priv->fbc.enabled = false;

	/* Disable compression */
	fbc_ctl = I915_READ(FBC_CONTROL);
	if ((fbc_ctl & FBC_CTL_EN) == 0)
		return;

	fbc_ctl &= ~FBC_CTL_EN;
	I915_WRITE(FBC_CONTROL, fbc_ctl);

	/* Wait for compressing bit to clear */
	if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
		DRM_DEBUG_KMS("FBC idle timed out\n");
		return;
	}

	DRM_DEBUG_KMS("disabled FBC\n");
}

80
static void i8xx_fbc_enable(struct intel_crtc *crtc)
81
{
82 83
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct drm_framebuffer *fb = crtc->base.primary->fb;
84 85 86 87 88 89 90
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
	int cfb_pitch;
	int i;
	u32 fbc_ctl;

	dev_priv->fbc.enabled = true;

91 92
	/* Note: fbc.threshold == 1 for i8xx */
	cfb_pitch = dev_priv->fbc.uncompressed_size / FBC_LL_SIZE;
93 94 95 96
	if (fb->pitches[0] < cfb_pitch)
		cfb_pitch = fb->pitches[0];

	/* FBC_CTL wants 32B or 64B units */
97
	if (IS_GEN2(dev_priv))
98 99 100 101 102 103
		cfb_pitch = (cfb_pitch / 32) - 1;
	else
		cfb_pitch = (cfb_pitch / 64) - 1;

	/* Clear old tags */
	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
104
		I915_WRITE(FBC_TAG(i), 0);
105

106
	if (IS_GEN4(dev_priv)) {
107 108 109 110
		u32 fbc_ctl2;

		/* Set it up... */
		fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
111
		fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
112
		I915_WRITE(FBC_CONTROL2, fbc_ctl2);
113
		I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
114 115 116 117 118 119
	}

	/* enable it... */
	fbc_ctl = I915_READ(FBC_CONTROL);
	fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
	fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
120
	if (IS_I945GM(dev_priv))
121 122 123 124 125 126
		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
	fbc_ctl |= obj->fence_reg;
	I915_WRITE(FBC_CONTROL, fbc_ctl);

	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
127
		      cfb_pitch, crtc->base.y, plane_name(crtc->plane));
128 129
}

130
static bool i8xx_fbc_enabled(struct drm_i915_private *dev_priv)
131 132 133 134
{
	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}

135
static void g4x_fbc_enable(struct intel_crtc *crtc)
136
{
137 138
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct drm_framebuffer *fb = crtc->base.primary->fb;
139 140 141 142 143
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
	u32 dpfc_ctl;

	dev_priv->fbc.enabled = true;

144
	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
145 146 147 148 149 150
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
	else
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;

151
	I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
152 153 154 155

	/* enable it... */
	I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

156
	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
157 158
}

159
static void g4x_fbc_disable(struct drm_i915_private *dev_priv)
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
{
	u32 dpfc_ctl;

	dev_priv->fbc.enabled = false;

	/* Disable compression */
	dpfc_ctl = I915_READ(DPFC_CONTROL);
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
		I915_WRITE(DPFC_CONTROL, dpfc_ctl);

		DRM_DEBUG_KMS("disabled FBC\n");
	}
}

175
static bool g4x_fbc_enabled(struct drm_i915_private *dev_priv)
176 177 178 179
{
	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}

180
static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
181
{
182 183
	I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
	POSTING_READ(MSG_FBC_REND_STATE);
184 185
}

186
static void ilk_fbc_enable(struct intel_crtc *crtc)
187
{
188 189
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct drm_framebuffer *fb = crtc->base.primary->fb;
190 191
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
	u32 dpfc_ctl;
192
	int threshold = dev_priv->fbc.threshold;
193
	unsigned int y_offset;
194 195 196

	dev_priv->fbc.enabled = true;

197
	dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
198
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
199
		threshold++;
200

201
	switch (threshold) {
202 203 204 205 206 207 208 209 210 211 212 213
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}
	dpfc_ctl |= DPFC_CTL_FENCE_EN;
214
	if (IS_GEN5(dev_priv))
215 216
		dpfc_ctl |= obj->fence_reg;

217 218
	y_offset = get_crtc_fence_y_offset(crtc);
	I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
219 220 221 222
	I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
	/* enable it... */
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

223
	if (IS_GEN6(dev_priv)) {
224 225
		I915_WRITE(SNB_DPFC_CTL_SA,
			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
226
		I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
227 228
	}

229 230
	intel_fbc_nuke(dev_priv);

231
	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
232 233
}

234
static void ilk_fbc_disable(struct drm_i915_private *dev_priv)
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
{
	u32 dpfc_ctl;

	dev_priv->fbc.enabled = false;

	/* Disable compression */
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
	if (dpfc_ctl & DPFC_CTL_EN) {
		dpfc_ctl &= ~DPFC_CTL_EN;
		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);

		DRM_DEBUG_KMS("disabled FBC\n");
	}
}

250
static bool ilk_fbc_enabled(struct drm_i915_private *dev_priv)
251 252 253 254
{
	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}

255
static void gen7_fbc_enable(struct intel_crtc *crtc)
256
{
257 258
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct drm_framebuffer *fb = crtc->base.primary->fb;
259 260
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
	u32 dpfc_ctl;
261
	int threshold = dev_priv->fbc.threshold;
262 263 264

	dev_priv->fbc.enabled = true;

265
	dpfc_ctl = 0;
266
	if (IS_IVYBRIDGE(dev_priv))
267
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
268

269
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
270
		threshold++;
271

272
	switch (threshold) {
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}

	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;

	if (dev_priv->fbc.false_color)
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;

290
	if (IS_IVYBRIDGE(dev_priv)) {
291 292 293 294
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
			   ILK_FBCQ_DIS);
295
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
296
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
297 298
		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
299 300 301
			   HSW_FBCQ_DIS);
	}

302 303
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

304 305
	I915_WRITE(SNB_DPFC_CTL_SA,
		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
306
	I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
307

308
	intel_fbc_nuke(dev_priv);
309

310
	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
311 312
}

R
Rodrigo Vivi 已提交
313 314
/**
 * intel_fbc_enabled - Is FBC enabled?
315
 * @dev_priv: i915 device instance
R
Rodrigo Vivi 已提交
316 317 318 319 320
 *
 * This function is used to verify the current state of FBC.
 * FIXME: This should be tracked in the plane config eventually
 *        instead of queried at runtime for most callers.
 */
321
bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
322 323 324 325
{
	return dev_priv->fbc.enabled;
}

326 327 328 329 330 331 332 333 334 335 336 337
static void intel_fbc_enable(struct intel_crtc *crtc,
			     const struct drm_framebuffer *fb)
{
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;

	dev_priv->fbc.enable_fbc(crtc);

	dev_priv->fbc.crtc = crtc;
	dev_priv->fbc.fb_id = fb->base.id;
	dev_priv->fbc.y = crtc->base.y;
}

338 339 340 341 342
static void intel_fbc_work_fn(struct work_struct *__work)
{
	struct intel_fbc_work *work =
		container_of(to_delayed_work(__work),
			     struct intel_fbc_work, work);
343 344
	struct drm_i915_private *dev_priv = work->crtc->base.dev->dev_private;
	struct drm_framebuffer *crtc_fb = work->crtc->base.primary->fb;
345

P
Paulo Zanoni 已提交
346
	mutex_lock(&dev_priv->fbc.lock);
347 348 349 350
	if (work == dev_priv->fbc.fbc_work) {
		/* Double check that we haven't switched fb without cancelling
		 * the prior work.
		 */
351 352
		if (crtc_fb == work->fb)
			intel_fbc_enable(work->crtc, work->fb);
353 354 355

		dev_priv->fbc.fbc_work = NULL;
	}
P
Paulo Zanoni 已提交
356
	mutex_unlock(&dev_priv->fbc.lock);
357 358 359 360 361 362

	kfree(work);
}

static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
{
P
Paulo Zanoni 已提交
363 364
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
	if (dev_priv->fbc.fbc_work == NULL)
		return;

	DRM_DEBUG_KMS("cancelling pending FBC enable\n");

	/* Synchronisation is provided by struct_mutex and checking of
	 * dev_priv->fbc.fbc_work, so we can perform the cancellation
	 * entirely asynchronously.
	 */
	if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
		/* tasklet was killed before being run, clean up */
		kfree(dev_priv->fbc.fbc_work);

	/* Mark the work as no longer wanted so that if it does
	 * wake-up (because the work was already running and waiting
	 * for our mutex), it will discover that is no longer
	 * necessary to run.
	 */
	dev_priv->fbc.fbc_work = NULL;
}

386
static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
387 388
{
	struct intel_fbc_work *work;
389
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
390

P
Paulo Zanoni 已提交
391 392
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));

393 394 395 396 397
	intel_fbc_cancel_work(dev_priv);

	work = kzalloc(sizeof(*work), GFP_KERNEL);
	if (work == NULL) {
		DRM_ERROR("Failed to allocate FBC work structure\n");
398
		intel_fbc_enable(crtc, crtc->base.primary->fb);
399 400 401 402
		return;
	}

	work->crtc = crtc;
403
	work->fb = crtc->base.primary->fb;
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);

	dev_priv->fbc.fbc_work = work;

	/* Delay the actual enabling to let pageflipping cease and the
	 * display to settle before starting the compression. Note that
	 * this delay also serves a second purpose: it allows for a
	 * vblank to pass after disabling the FBC before we attempt
	 * to modify the control registers.
	 *
	 * A more complicated solution would involve tracking vblanks
	 * following the termination of the page-flipping sequence
	 * and indeed performing the enable as a co-routine and not
	 * waiting synchronously upon the vblank.
	 *
	 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
	 */
	schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}

424
static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
425 426 427 428 429
{
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));

	intel_fbc_cancel_work(dev_priv);

430
	dev_priv->fbc.disable_fbc(dev_priv);
P
Paulo Zanoni 已提交
431 432 433
	dev_priv->fbc.crtc = NULL;
}

R
Rodrigo Vivi 已提交
434 435
/**
 * intel_fbc_disable - disable FBC
436
 * @dev_priv: i915 device instance
R
Rodrigo Vivi 已提交
437 438 439
 *
 * This function disables FBC.
 */
440
void intel_fbc_disable(struct drm_i915_private *dev_priv)
441
{
442
	if (!dev_priv->fbc.enable_fbc)
443 444
		return;

P
Paulo Zanoni 已提交
445
	mutex_lock(&dev_priv->fbc.lock);
446
	__intel_fbc_disable(dev_priv);
P
Paulo Zanoni 已提交
447 448
	mutex_unlock(&dev_priv->fbc.lock);
}
449

P
Paulo Zanoni 已提交
450 451 452 453 454 455 456 457
/*
 * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
 * @crtc: the CRTC
 *
 * This function disables FBC if it's associated with the provided CRTC.
 */
void intel_fbc_disable_crtc(struct intel_crtc *crtc)
{
458
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
459

460
	if (!dev_priv->fbc.enable_fbc)
461 462
		return;

P
Paulo Zanoni 已提交
463 464
	mutex_lock(&dev_priv->fbc.lock);
	if (dev_priv->fbc.crtc == crtc)
465
		__intel_fbc_disable(dev_priv);
P
Paulo Zanoni 已提交
466
	mutex_unlock(&dev_priv->fbc.lock);
467 468
}

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
{
	switch (reason) {
	case FBC_OK:
		return "FBC enabled but currently disabled in hardware";
	case FBC_UNSUPPORTED:
		return "unsupported by this chipset";
	case FBC_NO_OUTPUT:
		return "no output";
	case FBC_STOLEN_TOO_SMALL:
		return "not enough stolen memory";
	case FBC_UNSUPPORTED_MODE:
		return "mode incompatible with compression";
	case FBC_MODE_TOO_LARGE:
		return "mode too large for compression";
	case FBC_BAD_PLANE:
		return "FBC unsupported on plane";
	case FBC_NOT_TILED:
		return "framebuffer not tiled or fenced";
	case FBC_MULTIPLE_PIPES:
		return "more than one pipe active";
	case FBC_MODULE_PARAM:
		return "disabled per module param";
	case FBC_CHIP_DEFAULT:
		return "disabled per chip default";
	case FBC_ROTATION:
		return "rotation unsupported";
496 497
	case FBC_IN_DBG_MASTER:
		return "Kernel debugger is active";
498 499
	case FBC_BAD_STRIDE:
		return "framebuffer stride not supported";
500 501
	case FBC_PIXEL_RATE:
		return "pixel rate is too big";
502 503
	case FBC_PIXEL_FORMAT:
		return "pixel format is invalid";
504 505 506 507 508 509 510
	default:
		MISSING_CASE(reason);
		return "unknown reason";
	}
}

static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
511 512 513
			      enum no_fbc_reason reason)
{
	if (dev_priv->fbc.no_fbc_reason == reason)
514
		return;
515 516

	dev_priv->fbc.no_fbc_reason = reason;
517
	DRM_DEBUG_KMS("Disabling FBC: %s\n", intel_no_fbc_reason_str(reason));
518 519
}

520 521 522
static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
{
	struct drm_crtc *crtc = NULL, *tmp_crtc;
523
	enum pipe pipe;
524
	bool pipe_a_only = false;
525 526 527 528 529 530

	if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
		pipe_a_only = true;

	for_each_pipe(dev_priv, pipe) {
		tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
531 532

		if (intel_crtc_active(tmp_crtc) &&
533
		    to_intel_plane_state(tmp_crtc->primary->state)->visible)
534
			crtc = tmp_crtc;
535 536 537

		if (pipe_a_only)
			break;
538 539
	}

540
	if (!crtc || crtc->primary->fb == NULL)
541 542 543 544 545
		return NULL;

	return crtc;
}

546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
{
	enum pipe pipe;
	int n_pipes = 0;
	struct drm_crtc *crtc;

	if (INTEL_INFO(dev_priv)->gen > 4)
		return true;

	for_each_pipe(dev_priv, pipe) {
		crtc = dev_priv->pipe_to_crtc_mapping[pipe];

		if (intel_crtc_active(crtc) &&
		    to_intel_plane_state(crtc->primary->state)->visible)
			n_pipes++;
	}

	return (n_pipes < 2);
}

566
static int find_compression_threshold(struct drm_i915_private *dev_priv,
567 568 569 570 571 572
				      struct drm_mm_node *node,
				      int size,
				      int fb_cpp)
{
	int compression_threshold = 1;
	int ret;
573 574 575 576 577 578 579 580 581 582
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
	if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
		end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
	else
		end = dev_priv->gtt.stolen_usable_size;
583 584 585 586 587 588 589 590 591

	/* HACK: This code depends on what we will do in *_enable_fbc. If that
	 * code changes, this code needs to change as well.
	 *
	 * The enable_fbc code will attempt to use one of our 2 compression
	 * thresholds, therefore, in that case, we only have 1 resort.
	 */

	/* Try to over-allocate to reduce reallocations and fragmentation. */
592 593
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
						   4096, 0, end);
594 595 596 597 598 599 600 601 602
	if (ret == 0)
		return compression_threshold;

again:
	/* HW's ability to limit the CFB is 1:4 */
	if (compression_threshold > 4 ||
	    (fb_cpp == 2 && compression_threshold == 2))
		return 0;

603 604
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
						   4096, 0, end);
605
	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
606 607 608 609 610 611 612 613 614
		return 0;
	} else if (ret) {
		compression_threshold <<= 1;
		goto again;
	} else {
		return compression_threshold;
	}
}

615 616
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
			       int fb_cpp)
617 618 619 620
{
	struct drm_mm_node *uninitialized_var(compressed_llb);
	int ret;

621
	ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
622 623 624 625 626 627 628 629 630 631 632 633
					 size, fb_cpp);
	if (!ret)
		goto err_llb;
	else if (ret > 1) {
		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");

	}

	dev_priv->fbc.threshold = ret;

	if (INTEL_INFO(dev_priv)->gen >= 5)
		I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
634
	else if (IS_GM45(dev_priv)) {
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
		I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
	} else {
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
		if (!compressed_llb)
			goto err_fb;

		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
						  4096, 4096);
		if (ret)
			goto err_fb;

		dev_priv->fbc.compressed_llb = compressed_llb;

		I915_WRITE(FBC_CFB_BASE,
			   dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
		I915_WRITE(FBC_LL_BASE,
			   dev_priv->mm.stolen_base + compressed_llb->start);
	}

	dev_priv->fbc.uncompressed_size = size;

656 657 658
	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
		      dev_priv->fbc.compressed_fb.size,
		      dev_priv->fbc.threshold);
659 660 661 662 663 664 665 666 667 668 669

	return 0;

err_fb:
	kfree(compressed_llb);
	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
err_llb:
	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
	return -ENOSPC;
}

670
static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
{
	if (dev_priv->fbc.uncompressed_size == 0)
		return;

	i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);

	if (dev_priv->fbc.compressed_llb) {
		i915_gem_stolen_remove_node(dev_priv,
					    dev_priv->fbc.compressed_llb);
		kfree(dev_priv->fbc.compressed_llb);
	}

	dev_priv->fbc.uncompressed_size = 0;
}

686
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
687
{
688
	if (!dev_priv->fbc.enable_fbc)
689 690
		return;

P
Paulo Zanoni 已提交
691
	mutex_lock(&dev_priv->fbc.lock);
692
	__intel_fbc_cleanup_cfb(dev_priv);
P
Paulo Zanoni 已提交
693 694 695
	mutex_unlock(&dev_priv->fbc.lock);
}

696 697
static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
			       int fb_cpp)
698 699 700 701 702
{
	if (size <= dev_priv->fbc.uncompressed_size)
		return 0;

	/* Release any current block */
703
	__intel_fbc_cleanup_cfb(dev_priv);
704

705
	return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
706 707
}

708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
static bool stride_is_valid(struct drm_i915_private *dev_priv,
			    unsigned int stride)
{
	/* These should have been caught earlier. */
	WARN_ON(stride < 512);
	WARN_ON((stride & (64 - 1)) != 0);

	/* Below are the additional FBC restrictions. */

	if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
		return stride == 4096 || stride == 8192;

	if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
		return false;

	if (stride > 16384)
		return false;

	return true;
}

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
static bool pixel_format_is_valid(struct drm_framebuffer *fb)
{
	struct drm_device *dev = fb->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	switch (fb->pixel_format) {
	case DRM_FORMAT_XRGB8888:
	case DRM_FORMAT_XBGR8888:
		return true;
	case DRM_FORMAT_XRGB1555:
	case DRM_FORMAT_RGB565:
		/* 16bpp not supported on gen2 */
		if (IS_GEN2(dev))
			return false;
		/* WaFbcOnly1to1Ratio:ctg */
		if (IS_G4X(dev_priv))
			return false;
		return true;
	default:
		return false;
	}
}

752
/**
P
Paulo Zanoni 已提交
753
 * __intel_fbc_update - enable/disable FBC as needed, unlocked
754
 * @dev_priv: i915 device instance
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
 *
 * Set up the framebuffer compression hardware at mode set time.  We
 * enable it if possible:
 *   - plane A only (on pre-965)
 *   - no pixel mulitply/line duplication
 *   - no alpha buffer discard
 *   - no dual wide
 *   - framebuffer <= max_hdisplay in width, max_vdisplay in height
 *
 * We can't assume that any compression will take place (worst case),
 * so the compressed buffer has to be the same size as the uncompressed
 * one.  It also must reside (along with the line length buffer) in
 * stolen memory.
 *
 * We need to enable/disable FBC on a global basis.
 */
771
static void __intel_fbc_update(struct drm_i915_private *dev_priv)
772
{
773
	struct drm_crtc *crtc = NULL;
774 775 776 777 778 779
	struct intel_crtc *intel_crtc;
	struct drm_framebuffer *fb;
	struct drm_i915_gem_object *obj;
	const struct drm_display_mode *adjusted_mode;
	unsigned int max_width, max_height;

P
Paulo Zanoni 已提交
780 781
	WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));

782
	/* disable framebuffer compression in vGPU */
783
	if (intel_vgpu_active(dev_priv->dev))
784 785
		i915.enable_fbc = 0;

786
	if (i915.enable_fbc < 0) {
787
		set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT);
788 789 790
		goto out_disable;
	}

R
Rodrigo Vivi 已提交
791
	if (!i915.enable_fbc) {
792
		set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM);
793
		goto out_disable;
794 795 796 797 798 799 800 801 802 803 804
	}

	/*
	 * If FBC is already on, we just have to verify that we can
	 * keep it that way...
	 * Need to disable if:
	 *   - more than one pipe is active
	 *   - changing FBC params (stride, fence, mode)
	 *   - new fb is too large to fit in compressed buffer
	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
	 */
805
	crtc = intel_fbc_find_crtc(dev_priv);
806 807
	if (!crtc) {
		set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT);
808
		goto out_disable;
809
	}
810

811 812 813 814 815
	if (!multiple_pipes_ok(dev_priv)) {
		set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES);
		goto out_disable;
	}

816 817 818
	intel_crtc = to_intel_crtc(crtc);
	fb = crtc->primary->fb;
	obj = intel_fb_obj(fb);
819
	adjusted_mode = &intel_crtc->config->base.adjusted_mode;
820 821 822

	if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
	    (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
823
		set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE);
824 825 826
		goto out_disable;
	}

827
	if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
828 829
		max_width = 4096;
		max_height = 4096;
830
	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
831 832 833 834 835 836
		max_width = 4096;
		max_height = 2048;
	} else {
		max_width = 2048;
		max_height = 1536;
	}
837 838
	if (intel_crtc->config->pipe_src_w > max_width ||
	    intel_crtc->config->pipe_src_h > max_height) {
839
		set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
840 841
		goto out_disable;
	}
842
	if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
843
	    intel_crtc->plane != PLANE_A) {
844
		set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
845 846 847 848 849 850 851 852
		goto out_disable;
	}

	/* The use of a CPU fence is mandatory in order to detect writes
	 * by the CPU to the scanout and trigger updates to the FBC.
	 */
	if (obj->tiling_mode != I915_TILING_X ||
	    obj->fence_reg == I915_FENCE_REG_NONE) {
853
		set_no_fbc_reason(dev_priv, FBC_NOT_TILED);
854 855
		goto out_disable;
	}
856
	if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
857
	    crtc->primary->state->rotation != BIT(DRM_ROTATE_0)) {
858
		set_no_fbc_reason(dev_priv, FBC_ROTATION);
859 860 861
		goto out_disable;
	}

862 863 864 865 866
	if (!stride_is_valid(dev_priv, fb->pitches[0])) {
		set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
		goto out_disable;
	}

867 868 869 870 871
	if (!pixel_format_is_valid(fb)) {
		set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
		goto out_disable;
	}

872
	/* If the kernel debugger is active, always disable compression */
873 874
	if (in_dbg_master()) {
		set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
875
		goto out_disable;
876
	}
877

878 879 880 881 882 883 884 885
	/* WaFbcExceedCdClockThreshold:hsw,bdw */
	if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
	    ilk_pipe_pixel_rate(intel_crtc->config) >=
	    dev_priv->cdclk_freq * 95 / 100) {
		set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
		goto out_disable;
	}

886
	if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
887
				drm_format_plane_cpp(fb->pixel_format, 0))) {
888
		set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
889 890 891 892 893 894 895 896
		goto out_disable;
	}

	/* If the scanout has not changed, don't modify the FBC settings.
	 * Note that we make the fundamental assumption that the fb->obj
	 * cannot be unpinned (and have its GTT offset and fence revoked)
	 * without first being decoupled from the scanout and FBC disabled.
	 */
897
	if (dev_priv->fbc.crtc == intel_crtc &&
898 899 900 901
	    dev_priv->fbc.fb_id == fb->base.id &&
	    dev_priv->fbc.y == crtc->y)
		return;

902
	if (intel_fbc_enabled(dev_priv)) {
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
		/* We update FBC along two paths, after changing fb/crtc
		 * configuration (modeswitching) and after page-flipping
		 * finishes. For the latter, we know that not only did
		 * we disable the FBC at the start of the page-flip
		 * sequence, but also more than one vblank has passed.
		 *
		 * For the former case of modeswitching, it is possible
		 * to switch between two FBC valid configurations
		 * instantaneously so we do need to disable the FBC
		 * before we can modify its control registers. We also
		 * have to wait for the next vblank for that to take
		 * effect. However, since we delay enabling FBC we can
		 * assume that a vblank has passed since disabling and
		 * that we can safely alter the registers in the deferred
		 * callback.
		 *
		 * In the scenario that we go from a valid to invalid
		 * and then back to valid FBC configuration we have
		 * no strict enforcement that a vblank occurred since
		 * disabling the FBC. However, along all current pipe
		 * disabling paths we do need to wait for a vblank at
		 * some point. And we wait before enabling FBC anyway.
		 */
		DRM_DEBUG_KMS("disabling active FBC for update\n");
927
		__intel_fbc_disable(dev_priv);
928 929
	}

930
	intel_fbc_schedule_enable(intel_crtc);
931 932 933 934 935
	dev_priv->fbc.no_fbc_reason = FBC_OK;
	return;

out_disable:
	/* Multiple disables should be harmless */
936
	if (intel_fbc_enabled(dev_priv)) {
937
		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
938
		__intel_fbc_disable(dev_priv);
939
	}
940
	__intel_fbc_cleanup_cfb(dev_priv);
P
Paulo Zanoni 已提交
941 942 943 944
}

/*
 * intel_fbc_update - enable/disable FBC as needed
945
 * @dev_priv: i915 device instance
P
Paulo Zanoni 已提交
946 947 948
 *
 * This function reevaluates the overall state and enables or disables FBC.
 */
949
void intel_fbc_update(struct drm_i915_private *dev_priv)
P
Paulo Zanoni 已提交
950
{
951
	if (!dev_priv->fbc.enable_fbc)
952 953
		return;

P
Paulo Zanoni 已提交
954
	mutex_lock(&dev_priv->fbc.lock);
955
	__intel_fbc_update(dev_priv);
P
Paulo Zanoni 已提交
956
	mutex_unlock(&dev_priv->fbc.lock);
957 958
}

959 960 961 962 963 964
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
			  unsigned int frontbuffer_bits,
			  enum fb_op_origin origin)
{
	unsigned int fbc_bits;

965
	if (!dev_priv->fbc.enable_fbc)
966 967
		return;

968 969 970
	if (origin == ORIGIN_GTT)
		return;

P
Paulo Zanoni 已提交
971 972
	mutex_lock(&dev_priv->fbc.lock);

973 974 975 976
	if (dev_priv->fbc.enabled)
		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
	else if (dev_priv->fbc.fbc_work)
		fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
977
					dev_priv->fbc.fbc_work->crtc->pipe);
978 979 980 981 982 983
	else
		fbc_bits = dev_priv->fbc.possible_framebuffer_bits;

	dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);

	if (dev_priv->fbc.busy_bits)
984
		__intel_fbc_disable(dev_priv);
P
Paulo Zanoni 已提交
985 986

	mutex_unlock(&dev_priv->fbc.lock);
987 988 989
}

void intel_fbc_flush(struct drm_i915_private *dev_priv,
990
		     unsigned int frontbuffer_bits, enum fb_op_origin origin)
991
{
992
	if (!dev_priv->fbc.enable_fbc)
993 994
		return;

995 996
	if (origin == ORIGIN_GTT)
		return;
P
Paulo Zanoni 已提交
997

998
	mutex_lock(&dev_priv->fbc.lock);
999 1000 1001

	dev_priv->fbc.busy_bits &= ~frontbuffer_bits;

1002 1003
	if (!dev_priv->fbc.busy_bits) {
		__intel_fbc_disable(dev_priv);
1004
		__intel_fbc_update(dev_priv);
1005
	}
P
Paulo Zanoni 已提交
1006 1007

	mutex_unlock(&dev_priv->fbc.lock);
1008 1009
}

R
Rodrigo Vivi 已提交
1010 1011 1012 1013 1014 1015
/**
 * intel_fbc_init - Initialize FBC
 * @dev_priv: the i915 device
 *
 * This function might be called during PM init process.
 */
1016 1017
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
1018 1019
	enum pipe pipe;

P
Paulo Zanoni 已提交
1020 1021
	mutex_init(&dev_priv->fbc.lock);

1022 1023
	if (!HAS_FBC(dev_priv)) {
		dev_priv->fbc.enabled = false;
1024
		dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
1025 1026 1027
		return;
	}

1028 1029 1030 1031 1032 1033 1034 1035
	for_each_pipe(dev_priv, pipe) {
		dev_priv->fbc.possible_framebuffer_bits |=
				INTEL_FRONTBUFFER_PRIMARY(pipe);

		if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
			break;
	}

1036
	if (INTEL_INFO(dev_priv)->gen >= 7) {
1037 1038 1039
		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
		dev_priv->fbc.enable_fbc = gen7_fbc_enable;
		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
1040
	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
1041 1042 1043
		dev_priv->fbc.fbc_enabled = ilk_fbc_enabled;
		dev_priv->fbc.enable_fbc = ilk_fbc_enable;
		dev_priv->fbc.disable_fbc = ilk_fbc_disable;
1044
	} else if (IS_GM45(dev_priv)) {
1045 1046 1047
		dev_priv->fbc.fbc_enabled = g4x_fbc_enabled;
		dev_priv->fbc.enable_fbc = g4x_fbc_enable;
		dev_priv->fbc.disable_fbc = g4x_fbc_disable;
1048
	} else {
1049 1050 1051
		dev_priv->fbc.fbc_enabled = i8xx_fbc_enabled;
		dev_priv->fbc.enable_fbc = i8xx_fbc_enable;
		dev_priv->fbc.disable_fbc = i8xx_fbc_disable;
1052 1053 1054 1055 1056

		/* This value was pulled out of someone's hat */
		I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
	}

1057
	dev_priv->fbc.enabled = dev_priv->fbc.fbc_enabled(dev_priv);
1058
}