intel_gt_pm.c 4.9 KB
Newer Older
1 2 3 4 5 6 7
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

#include "i915_drv.h"
8
#include "i915_params.h"
9
#include "intel_context.h"
10
#include "intel_engine_pm.h"
11
#include "intel_gt.h"
12 13 14 15 16 17 18 19 20
#include "intel_gt_pm.h"
#include "intel_pm.h"
#include "intel_wakeref.h"

static void pm_notify(struct drm_i915_private *i915, int state)
{
	blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
}

21
static int __gt_unpark(struct intel_wakeref *wf)
22
{
23 24
	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
	struct drm_i915_private *i915 = gt->i915;
25 26 27 28 29 30 31 32 33 34 35 36 37 38

	GEM_TRACE("\n");

	/*
	 * It seems that the DMC likes to transition between the DC states a lot
	 * when there are no connected displays (no active power domains) during
	 * command submission.
	 *
	 * This activity has negative impact on the performance of the chip with
	 * huge latencies observed in the interrupt handler and elsewhere.
	 *
	 * Work around it by grabbing a GT IRQ power domain whilst there is any
	 * GT activity, preventing any DC state transitions.
	 */
39 40
	gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
	GEM_BUG_ON(!gt->awake);
41 42 43 44 45 46 47 48 49

	intel_enable_gt_powersave(i915);

	i915_update_gfx_val(i915);
	if (INTEL_GEN(i915) >= 6)
		gen6_rps_busy(i915);

	i915_pmu_gt_unparked(i915);

50
	intel_gt_queue_hangcheck(gt);
51 52 53 54 55 56

	pm_notify(i915, INTEL_GT_UNPARK);

	return 0;
}

57
static int __gt_park(struct intel_wakeref *wf)
58 59 60 61 62 63 64 65 66 67 68 69 70
{
	struct drm_i915_private *i915 =
		container_of(wf, typeof(*i915), gt.wakeref);
	intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);

	GEM_TRACE("\n");

	pm_notify(i915, INTEL_GT_PARK);

	i915_pmu_gt_parked(i915);
	if (INTEL_GEN(i915) >= 6)
		gen6_rps_idle(i915);

71 72 73
	/* Everything switched off, flush any residual interrupt just in case */
	intel_synchronize_irq(i915);

74 75 76 77 78 79
	GEM_BUG_ON(!wakeref);
	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);

	return 0;
}

80 81 82 83 84
static const struct intel_wakeref_ops wf_ops = {
	.get = __gt_unpark,
	.put = __gt_park,
	.flags = INTEL_WAKEREF_PUT_ASYNC,
};
85

86
void intel_gt_pm_init_early(struct intel_gt *gt)
87
{
88 89
	intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);

90
	BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
91 92
}

93
static bool reset_engines(struct intel_gt *gt)
94
{
95
	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
96 97
		return false;

98
	return __intel_gt_reset(gt, ALL_ENGINES) == 0;
99 100 101 102
}

/**
 * intel_gt_sanitize: called after the GPU has lost power
103
 * @gt: the i915 GT container
104 105 106 107 108 109 110
 * @force: ignore a failed reset and sanitize engine state anyway
 *
 * Anytime we reset the GPU, either with an explicit GPU reset or through a
 * PCI power cycle, the GPU loses state and we must reset our state tracking
 * to match. Note that calling intel_gt_sanitize() if the GPU has not
 * been reset results in much confusion!
 */
111
void intel_gt_sanitize(struct intel_gt *gt, bool force)
112 113 114 115 116 117
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	GEM_TRACE("\n");

118 119
	intel_uc_sanitize(&gt->uc);

120
	if (!reset_engines(gt) && !force)
121 122
		return;

123
	for_each_engine(engine, gt->i915, id)
124
		__intel_engine_reset(engine, false);
125 126
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
static bool is_mock_device(const struct intel_gt *gt)
{
	return I915_SELFTEST_ONLY(gt->awake == -1);
}

void intel_gt_pm_enable(struct intel_gt *gt)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	/* Powersaving is controlled by the host when inside a VM */
	if (intel_vgpu_active(gt->i915))
		return;

	if (is_mock_device(gt))
		return;

	intel_gt_pm_get(gt);

	for_each_engine(engine, gt->i915, id) {
		intel_engine_pm_get(engine);
		engine->serial++; /* force kernel context reload */
		intel_engine_pm_put(engine);
	}

	intel_gt_pm_put(gt);
}

void intel_gt_pm_disable(struct intel_gt *gt)
{
	if (is_mock_device(gt))
		return;

	intel_sanitize_gt_powersave(gt->i915);
}

163
int intel_gt_resume(struct intel_gt *gt)
164 165 166
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
167
	int err = 0;
168 169 170 171 172 173 174

	/*
	 * After resume, we may need to poke into the pinned kernel
	 * contexts to paper over any damage caused by the sudden suspend.
	 * Only the kernel contexts should remain pinned over suspend,
	 * allowing us to fixup the user contexts on their first pin.
	 */
175
	intel_gt_pm_get(gt);
176
	for_each_engine(engine, gt->i915, id) {
177 178
		struct intel_context *ce;

179 180
		intel_engine_pm_get(engine);

181
		ce = engine->kernel_context;
182 183 184
		if (ce) {
			GEM_BUG_ON(!intel_context_is_pinned(ce));
			mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
185
			ce->ops->reset(ce);
186 187
			mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
		}
188

189 190 191 192 193 194 195 196 197 198
		engine->serial++; /* kernel context lost */
		err = engine->resume(engine);

		intel_engine_pm_put(engine);
		if (err) {
			dev_err(gt->i915->drm.dev,
				"Failed to restart %s (%d)\n",
				engine->name, err);
			break;
		}
199
	}
200 201 202
	intel_gt_pm_put(gt);

	return err;
203
}
204 205 206 207 208 209 210 211 212 213 214 215

void intel_gt_runtime_suspend(struct intel_gt *gt)
{
	intel_uc_runtime_suspend(&gt->uc);
}

int intel_gt_runtime_resume(struct intel_gt *gt)
{
	intel_gt_init_swizzling(gt);

	return intel_uc_runtime_resume(&gt->uc);
}