intel_frontbuffer.c 8.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *	Daniel Vetter <daniel.vetter@ffwll.ch>
 */

/**
 * DOC: frontbuffer tracking
 *
 * Many features require us to track changes to the currently active
31
 * frontbuffer, especially rendering targeted at the frontbuffer.
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 *
 * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
 * frontbuffer slots through i915_gem_track_fb(). The function in this file are
 * then called when the contents of the frontbuffer are invalidated, when
 * frontbuffer rendering has stopped again to flush out all the changes and when
 * the frontbuffer is exchanged with a flip. Subsystems interested in
 * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
 * into the relevant places and filter for the frontbuffer slots that they are
 * interested int.
 *
 * On a high level there are two types of powersaving features. The first one
 * work like a special cache (FBC and PSR) and are interested when they should
 * stop caching and when to restart caching. This is done by placing callbacks
 * into the invalidate and the flush functions: At invalidate the caching must
 * be stopped and at flush time it can be restarted. And maybe they need to know
 * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
 * and flush on its own) which can be achieved with placing callbacks into the
 * flip functions.
 *
 * The other type of display power saving feature only cares about busyness
 * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
 * busyness. There is no direct way to detect idleness. Instead an idle timer
 * work delayed work should be started from the flush and flip functions and
 * cancelled as soon as busyness is detected.
 *
 * Note that there's also an older frontbuffer activity tracking scheme which
58
 * just tracks general activity. This is done by the various mark_busy and
59 60 61 62 63 64 65 66 67 68 69 70
 * mark_idle functions. For display power management features using these
 * functions is deprecated and should be avoided.
 */

#include <drm/drmP.h>

#include "intel_drv.h"
#include "i915_drv.h"

/**
 * intel_fb_obj_invalidate - invalidate frontbuffer object
 * @obj: GEM object to invalidate
71
 * @origin: which operation caused the invalidation
72 73 74
 *
 * This function gets called every time rendering on the given object starts and
 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
75
 * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
76 77 78 79
 * until the rendering completes or a flip on this frontbuffer plane is
 * scheduled.
 */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
80
			     enum fb_op_origin origin)
81 82
{
	struct drm_device *dev = obj->base.dev;
83
	struct drm_i915_private *dev_priv = to_i915(dev);
84 85 86 87 88 89

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	if (!obj->frontbuffer_bits)
		return;

90
	if (origin == ORIGIN_CS) {
91 92 93 94 95 96 97 98
		mutex_lock(&dev_priv->fb_tracking.lock);
		dev_priv->fb_tracking.busy_bits
			|= obj->frontbuffer_bits;
		dev_priv->fb_tracking.flip_bits
			&= ~obj->frontbuffer_bits;
		mutex_unlock(&dev_priv->fb_tracking.lock);
	}

R
Rodrigo Vivi 已提交
99
	intel_psr_invalidate(dev, obj->frontbuffer_bits);
100
	intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
101
	intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
102 103 104 105 106 107
}

/**
 * intel_frontbuffer_flush - flush frontbuffer
 * @dev: DRM device
 * @frontbuffer_bits: frontbuffer plane tracking bits
108
 * @origin: which operation caused the flush
109 110 111
 *
 * This function gets called every time rendering on the given planes has
 * completed and frontbuffer caching can be started again. Flushes will get
112
 * delayed if they're blocked by some outstanding asynchronous rendering.
113 114 115
 *
 * Can be called without any locks held.
 */
116 117 118
static void intel_frontbuffer_flush(struct drm_device *dev,
				    unsigned frontbuffer_bits,
				    enum fb_op_origin origin)
119
{
120
	struct drm_i915_private *dev_priv = to_i915(dev);
121 122 123 124 125 126

	/* Delay flushing when rings are still busy.*/
	mutex_lock(&dev_priv->fb_tracking.lock);
	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
	mutex_unlock(&dev_priv->fb_tracking.lock);

127 128 129
	if (!frontbuffer_bits)
		return;

130
	intel_edp_drrs_flush(dev, frontbuffer_bits);
131
	intel_psr_flush(dev, frontbuffer_bits, origin);
132
	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
133 134 135 136 137 138
}

/**
 * intel_fb_obj_flush - flush frontbuffer object
 * @obj: GEM object to flush
 * @retire: set when retiring asynchronous rendering
139
 * @origin: which operation caused the flush
140 141 142 143 144 145
 *
 * This function gets called every time rendering on the given object has
 * completed and frontbuffer caching can be started again. If @retire is true
 * then any delayed flushes will be unblocked.
 */
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
146
			bool retire, enum fb_op_origin origin)
147 148
{
	struct drm_device *dev = obj->base.dev;
149
	struct drm_i915_private *dev_priv = to_i915(dev);
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	unsigned frontbuffer_bits;

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	if (!obj->frontbuffer_bits)
		return;

	frontbuffer_bits = obj->frontbuffer_bits;

	if (retire) {
		mutex_lock(&dev_priv->fb_tracking.lock);
		/* Filter out new bits since rendering started. */
		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;

		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
		mutex_unlock(&dev_priv->fb_tracking.lock);
	}

168
	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
169 170 171
}

/**
172
 * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
173 174 175 176 177 178 179 180 181 182 183 184 185
 * @dev: DRM device
 * @frontbuffer_bits: frontbuffer plane tracking bits
 *
 * This function gets called after scheduling a flip on @obj. The actual
 * frontbuffer flushing will be delayed until completion is signalled with
 * intel_frontbuffer_flip_complete. If an invalidate happens in between this
 * flush will be cancelled.
 *
 * Can be called without any locks held.
 */
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
				    unsigned frontbuffer_bits)
{
186
	struct drm_i915_private *dev_priv = to_i915(dev);
187 188

	mutex_lock(&dev_priv->fb_tracking.lock);
189 190 191
	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
	/* Remove stale busy bits due to the old buffer. */
	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
192
	mutex_unlock(&dev_priv->fb_tracking.lock);
193

194
	intel_psr_single_frame_update(dev, frontbuffer_bits);
195 196 197
}

/**
198
 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
199 200 201 202
 * @dev: DRM device
 * @frontbuffer_bits: frontbuffer plane tracking bits
 *
 * This function gets called after the flip has been latched and will complete
203
 * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
204 205 206 207 208 209
 *
 * Can be called without any locks held.
 */
void intel_frontbuffer_flip_complete(struct drm_device *dev,
				     unsigned frontbuffer_bits)
{
210
	struct drm_i915_private *dev_priv = to_i915(dev);
211 212 213 214 215 216 217

	mutex_lock(&dev_priv->fb_tracking.lock);
	/* Mask any cancelled flips. */
	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
	mutex_unlock(&dev_priv->fb_tracking.lock);

218
	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
219
}
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

/**
 * intel_frontbuffer_flip - synchronous frontbuffer flip
 * @dev: DRM device
 * @frontbuffer_bits: frontbuffer plane tracking bits
 *
 * This function gets called after scheduling a flip on @obj. This is for
 * synchronous plane updates which will happen on the next vblank and which will
 * not get delayed by pending gpu rendering.
 *
 * Can be called without any locks held.
 */
void intel_frontbuffer_flip(struct drm_device *dev,
			    unsigned frontbuffer_bits)
{
235
	struct drm_i915_private *dev_priv = to_i915(dev);
236 237 238 239 240 241

	mutex_lock(&dev_priv->fb_tracking.lock);
	/* Remove stale busy bits due to the old buffer. */
	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
	mutex_unlock(&dev_priv->fb_tracking.lock);

242
	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
243
}