intel_guc.h 6.7 KB
Newer Older
1
/* SPDX-License-Identifier: MIT */
2
/*
3
 * Copyright © 2014-2019 Intel Corporation
4 5 6 7 8
 */

#ifndef _INTEL_GUC_H_
#define _INTEL_GUC_H_

9
#include <linux/xarray.h>
10
#include <linux/delay.h>
11

12
#include "intel_uncore.h"
13
#include "intel_guc_fw.h"
14 15 16
#include "intel_guc_fwif.h"
#include "intel_guc_ct.h"
#include "intel_guc_log.h"
17
#include "intel_guc_reg.h"
18
#include "intel_uc_fw.h"
19
#include "i915_utils.h"
20 21
#include "i915_vma.h"

22 23
struct __guc_ads_blob;

24 25
/*
 * Top level structure of GuC. It handles firmware loading and manages client
26 27
 * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
 * submission.
28
 */
29 30 31 32 33
struct intel_guc {
	struct intel_uc_fw fw;
	struct intel_guc_log log;
	struct intel_guc_ct ct;

34 35 36 37
	/* Global engine used to submit requests to GuC */
	struct i915_sched_engine *sched_engine;
	struct i915_request *stalled_request;

38
	/* intel_guc_recv interrupt related state */
39 40
	spinlock_t irq_lock;
	unsigned int msg_enabled_mask;
41

42
	struct {
43 44 45
		void (*reset)(struct intel_guc *guc);
		void (*enable)(struct intel_guc *guc);
		void (*disable)(struct intel_guc *guc);
46 47
	} interrupts;

48 49 50 51 52 53 54 55
	/*
	 * contexts_lock protects the pool of free guc ids and a linked list of
	 * guc ids available to be stolen
	 */
	spinlock_t contexts_lock;
	struct ida guc_ids;
	struct list_head guc_id_list;

56
	bool submission_selected;
57

58
	struct i915_vma *ads_vma;
59 60
	struct __guc_ads_blob *ads_blob;

61 62
	struct i915_vma *lrc_desc_pool;
	void *lrc_desc_pool_vaddr;
63

64 65 66
	/* guc_id to intel_context lookup */
	struct xarray context_lookup;

67 68 69
	/* Control params for fw initialization */
	u32 params[GUC_CTL_MAX_DWORDS];

70 71 72 73 74 75 76
	/* GuC's FW specific registers used in MMIO send */
	struct {
		u32 base;
		unsigned int count;
		enum forcewake_domains fw_domains;
	} send_regs;

77 78 79
	/* register used to send interrupts to the GuC FW */
	i915_reg_t notify_reg;

80 81 82
	/* Store msg (e.g. log flush) that we see while CTBs are disabled */
	u32 mmio_msg;

83 84 85 86
	/* To serialize the intel_guc_send actions */
	struct mutex send_mutex;
};

87 88 89 90 91
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
{
	return container_of(log, struct intel_guc, log);
}

92 93 94
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
95 96 97 98 99 100 101 102
	return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
}

static
inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len)
{
	return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
				 INTEL_GUC_CT_SEND_NB);
103 104 105 106 107 108
}

static inline int
intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
			   u32 *response_buf, u32 response_buf_size)
{
109
	return intel_guc_ct_send(&guc->ct, action, len,
110
				 response_buf, response_buf_size, 0);
111 112
}

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
					   const u32 *action,
					   u32 len,
					   bool loop)
{
	int err;
	unsigned int sleep_period_ms = 1;
	bool not_atomic = !in_atomic() && !irqs_disabled();

	/*
	 * FIXME: Have caller pass in if we are in an atomic context to avoid
	 * using in_atomic(). It is likely safe here as we check for irqs
	 * disabled which basically all the spin locks in the i915 do but
	 * regardless this should be cleaned up.
	 */

	/* No sleeping with spin locks, just busy loop */
	might_sleep_if(loop && not_atomic);

retry:
	err = intel_guc_send_nb(guc, action, len);
	if (unlikely(err == -EBUSY && loop)) {
		if (likely(not_atomic)) {
			if (msleep_interruptible(sleep_period_ms))
				return -EINTR;
			sleep_period_ms = sleep_period_ms << 1;
		} else {
			cpu_relax();
		}
		goto retry;
	}

	return err;
}

148 149
static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
{
150
	intel_guc_ct_event_handler(&guc->ct);
151 152
}

153 154 155 156 157 158 159 160
/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
#define GUC_GGTT_TOP	0xFEE00000

/**
 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
 * @guc: intel_guc structure.
 * @vma: i915 graphics virtual memory area.
 *
161
 * GuC does not allow any gfx GGTT address that falls into range
162 163
 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
 * Currently, in order to exclude [0, ggtt.pin_bias) address space from
164
 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
165
 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
166
 *
167
 * Return: GGTT offset of the @vma.
168
 */
169 170
static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
					struct i915_vma *vma)
171 172 173
{
	u32 offset = i915_ggtt_offset(vma);

174
	GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
175 176 177 178 179 180 181
	GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));

	return offset;
}

void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
182
void intel_guc_write_params(struct intel_guc *guc);
183 184
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
185
void intel_guc_notify(struct intel_guc *guc);
186 187
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
			u32 *response_buf, u32 response_buf_size);
188 189
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
				       const u32 *payload, u32 len);
190
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
191 192
int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
193
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
194 195
int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
				   struct i915_vma **out_vma, void **out_vaddr);
196

197 198
static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
199 200 201
	return intel_uc_fw_is_supported(&guc->fw);
}

202
static inline bool intel_guc_is_wanted(struct intel_guc *guc)
203 204
{
	return intel_uc_fw_is_enabled(&guc->fw);
205 206
}

207 208 209 210 211 212
static inline bool intel_guc_is_used(struct intel_guc *guc)
{
	GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
	return intel_uc_fw_is_available(&guc->fw);
}

213
static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
214
{
215
	return intel_uc_fw_is_running(&guc->fw);
216 217
}

218 219 220 221 222
static inline bool intel_guc_is_ready(struct intel_guc *guc)
{
	return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
}

223 224 225
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
	intel_uc_fw_sanitize(&guc->fw);
226
	intel_guc_ct_sanitize(&guc->ct);
227 228
	guc->mmio_msg = 0;

229 230 231
	return 0;
}

232 233 234 235 236 237 238 239 240 241 242 243 244 245
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
{
	spin_lock_irq(&guc->irq_lock);
	guc->msg_enabled_mask |= mask;
	spin_unlock_irq(&guc->irq_lock);
}

static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
{
	spin_lock_irq(&guc->irq_lock);
	guc->msg_enabled_mask &= ~mask;
	spin_unlock_irq(&guc->irq_lock);
}

246 247 248
int intel_guc_reset_engine(struct intel_guc *guc,
			   struct intel_engine_cs *engine);

249 250 251
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
					  const u32 *msg, u32 len);

252 253
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);

254
#endif