intel_gt_pm.c 5.8 KB
Newer Older
1 2 3 4 5 6 7
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

#include "i915_drv.h"
8
#include "i915_params.h"
9
#include "intel_context.h"
10
#include "intel_engine_pm.h"
11
#include "intel_gt.h"
12 13
#include "intel_gt_pm.h"
#include "intel_pm.h"
14
#include "intel_rc6.h"
15 16
#include "intel_wakeref.h"

17
static void pm_notify(struct intel_gt *gt, int state)
18
{
19
	blocking_notifier_call_chain(&gt->pm_notifications, state, gt->i915);
20 21
}

22
static int __gt_unpark(struct intel_wakeref *wf)
23
{
24 25
	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
	struct drm_i915_private *i915 = gt->i915;
26 27 28 29 30 31 32 33 34 35 36 37 38 39

	GEM_TRACE("\n");

	/*
	 * It seems that the DMC likes to transition between the DC states a lot
	 * when there are no connected displays (no active power domains) during
	 * command submission.
	 *
	 * This activity has negative impact on the performance of the chip with
	 * huge latencies observed in the interrupt handler and elsewhere.
	 *
	 * Work around it by grabbing a GT IRQ power domain whilst there is any
	 * GT activity, preventing any DC state transitions.
	 */
40 41
	gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
	GEM_BUG_ON(!gt->awake);
42 43 44 45 46 47 48 49 50

	intel_enable_gt_powersave(i915);

	i915_update_gfx_val(i915);
	if (INTEL_GEN(i915) >= 6)
		gen6_rps_busy(i915);

	i915_pmu_gt_unparked(i915);

51
	intel_gt_queue_hangcheck(gt);
52

53
	pm_notify(gt, INTEL_GT_UNPARK);
54 55 56 57

	return 0;
}

58
static int __gt_park(struct intel_wakeref *wf)
59
{
60 61 62
	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
	intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
	struct drm_i915_private *i915 = gt->i915;
63 64 65

	GEM_TRACE("\n");

66
	pm_notify(gt, INTEL_GT_PARK);
67 68 69 70 71

	i915_pmu_gt_parked(i915);
	if (INTEL_GEN(i915) >= 6)
		gen6_rps_idle(i915);

72 73 74
	/* Everything switched off, flush any residual interrupt just in case */
	intel_synchronize_irq(i915);

75 76 77 78 79 80
	GEM_BUG_ON(!wakeref);
	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);

	return 0;
}

81 82 83 84 85
static const struct intel_wakeref_ops wf_ops = {
	.get = __gt_unpark,
	.put = __gt_park,
	.flags = INTEL_WAKEREF_PUT_ASYNC,
};
86

87
void intel_gt_pm_init_early(struct intel_gt *gt)
88
{
89 90
	intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);

91
	BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
92 93
}

94 95 96 97 98 99 100 101 102 103
void intel_gt_pm_init(struct intel_gt *gt)
{
	/*
	 * Enabling power-management should be "self-healing". If we cannot
	 * enable a feature, simply leave it disabled with a notice to the
	 * user.
	 */
	intel_rc6_init(&gt->rc6);
}

104
static bool reset_engines(struct intel_gt *gt)
105
{
106
	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
107 108
		return false;

109
	return __intel_gt_reset(gt, ALL_ENGINES) == 0;
110 111 112 113
}

/**
 * intel_gt_sanitize: called after the GPU has lost power
114
 * @gt: the i915 GT container
115 116 117 118 119 120 121
 * @force: ignore a failed reset and sanitize engine state anyway
 *
 * Anytime we reset the GPU, either with an explicit GPU reset or through a
 * PCI power cycle, the GPU loses state and we must reset our state tracking
 * to match. Note that calling intel_gt_sanitize() if the GPU has not
 * been reset results in much confusion!
 */
122
void intel_gt_sanitize(struct intel_gt *gt, bool force)
123 124 125 126 127 128
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	GEM_TRACE("\n");

129 130
	intel_uc_sanitize(&gt->uc);

131
	if (!reset_engines(gt) && !force)
132 133
		return;

134
	for_each_engine(engine, gt->i915, id)
135
		__intel_engine_reset(engine, false);
136 137
}

138
void intel_gt_pm_disable(struct intel_gt *gt)
139
{
140 141
	if (!is_mock_gt(gt))
		intel_sanitize_gt_powersave(gt->i915);
142 143
}

144
void intel_gt_pm_fini(struct intel_gt *gt)
145
{
146
	intel_rc6_fini(&gt->rc6);
147 148
}

149
int intel_gt_resume(struct intel_gt *gt)
150 151 152
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
153
	int err = 0;
154 155 156 157 158 159 160

	/*
	 * After resume, we may need to poke into the pinned kernel
	 * contexts to paper over any damage caused by the sudden suspend.
	 * Only the kernel contexts should remain pinned over suspend,
	 * allowing us to fixup the user contexts on their first pin.
	 */
161
	intel_gt_pm_get(gt);
162 163 164
	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
	intel_rc6_sanitize(&gt->rc6);

165
	for_each_engine(engine, gt->i915, id) {
166 167
		struct intel_context *ce;

168 169
		intel_engine_pm_get(engine);

170
		ce = engine->kernel_context;
171 172 173
		if (ce) {
			GEM_BUG_ON(!intel_context_is_pinned(ce));
			mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
174
			ce->ops->reset(ce);
175 176
			mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
		}
177

178 179 180 181 182 183 184 185 186 187
		engine->serial++; /* kernel context lost */
		err = engine->resume(engine);

		intel_engine_pm_put(engine);
		if (err) {
			dev_err(gt->i915->drm.dev,
				"Failed to restart %s (%d)\n",
				engine->name, err);
			break;
		}
188
	}
189 190 191

	intel_rc6_enable(&gt->rc6);
	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
192 193 194
	intel_gt_pm_put(gt);

	return err;
195
}
196

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
static void wait_for_idle(struct intel_gt *gt)
{
	mutex_lock(&gt->i915->drm.struct_mutex); /* XXX */
	do {
		if (i915_gem_wait_for_idle(gt->i915,
					   I915_WAIT_LOCKED,
					   I915_GEM_IDLE_TIMEOUT) == -ETIME) {
			/* XXX hide warning from gem_eio */
			if (i915_modparams.reset) {
				dev_err(gt->i915->drm.dev,
					"Failed to idle engines, declaring wedged!\n");
				GEM_TRACE_DUMP();
			}

			/*
			 * Forcibly cancel outstanding work and leave
			 * the gpu quiet.
			 */
			intel_gt_set_wedged(gt);
		}
	} while (i915_retire_requests(gt->i915));
	mutex_unlock(&gt->i915->drm.struct_mutex);

	intel_gt_pm_wait_for_idle(gt);
}

void intel_gt_suspend(struct intel_gt *gt)
{
	intel_wakeref_t wakeref;

	/* We expect to be idle already; but also want to be independent */
	wait_for_idle(gt);

	with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
		intel_rc6_disable(&gt->rc6);
}

234 235 236 237 238 239 240 241 242 243 244
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
	intel_uc_runtime_suspend(&gt->uc);
}

int intel_gt_runtime_resume(struct intel_gt *gt)
{
	intel_gt_init_swizzling(gt);

	return intel_uc_runtime_resume(&gt->uc);
}
245 246 247 248

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_gt_pm.c"
#endif