intel_uc.h 7.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */
24 25
#ifndef _INTEL_UC_H_
#define _INTEL_UC_H_
26 27 28

#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
29
#include "intel_ringbuffer.h"
30

31 32
struct drm_i915_gem_request;

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
/*
 * This structure primarily describes the GEM object shared with the GuC.
 * The GEM object is held for the entire lifetime of our interaction with
 * the GuC, being allocated before the GuC is loaded with its firmware.
 * Because there's no way to update the address used by the GuC after
 * initialisation, the shared object must stay pinned into the GGTT as
 * long as the GuC is in use. We also keep the first page (only) mapped
 * into kernel address space, as it includes shared data that must be
 * updated on every request submission.
 *
 * The single GEM object described here is actually made up of several
 * separate areas, as far as the GuC is concerned. The first page (kept
 * kmap'd) includes the "process decriptor" which holds sequence data for
 * the doorbell, and one cacheline which actually *is* the doorbell; a
 * write to this will "ring the doorbell" (i.e. send an interrupt to the
 * GuC). The subsequent  pages of the client object constitute the work
 * queue (a circular array of work items), again described in the process
 * descriptor. Work queue pages are mapped momentarily as required.
 *
52 53 54 55 56 57 58 59 60 61 62 63
 * We also keep a few statistics on failures. Ideally, these should all
 * be zero!
 *   no_wq_space: times that the submission pre-check found no space was
 *                available in the work queue (note, the queue is shared,
 *                not per-engine). It is OK for this to be nonzero, but
 *                it should not be huge!
 *   q_fail: failed to enqueue a work item. This should never happen,
 *           because we check for space beforehand.
 *   b_fail: failed to ring the doorbell. This should never happen, unless
 *           somehow the hardware misbehaves, or maybe if the GuC firmware
 *           crashes? We probably need to reset the GPU to recover.
 *   retcode: errno from last guc_submit()
64
 */
65
struct i915_guc_client {
66
	struct i915_vma *vma;
67
	void *vaddr;
68
	struct i915_gem_context *owner;
69
	struct intel_guc *guc;
70 71

	uint32_t engines;		/* bitmap of (host) engine ids	*/
72 73 74
	uint32_t priority;
	uint32_t ctx_index;
	uint32_t proc_desc_offset;
75

76 77 78
	uint32_t doorbell_offset;
	uint32_t cookie;
	uint16_t doorbell_id;
79
	uint16_t padding[3];		/* Maintain alignment		*/
80

81
	spinlock_t wq_lock;
82 83 84
	uint32_t wq_offset;
	uint32_t wq_size;
	uint32_t wq_tail;
85
	uint32_t wq_rsvd;
86
	uint32_t no_wq_space;
87 88
	uint32_t b_fail;
	int retcode;
89 90

	/* Per-engine counts of GuC submissions */
91
	uint64_t submissions[I915_NUM_ENGINES];
92 93
};

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
enum intel_guc_fw_status {
	GUC_FIRMWARE_FAIL = -1,
	GUC_FIRMWARE_NONE = 0,
	GUC_FIRMWARE_PENDING,
	GUC_FIRMWARE_SUCCESS
};

/*
 * This structure encapsulates all the data needed during the process
 * of fetching, caching, and loading the firmware image into the GuC.
 */
struct intel_guc_fw {
	const char *			guc_fw_path;
	size_t				guc_fw_size;
	struct drm_i915_gem_object *	guc_fw_obj;
	enum intel_guc_fw_status	guc_fw_fetch_status;
	enum intel_guc_fw_status	guc_fw_load_status;

	uint16_t			guc_fw_major_wanted;
	uint16_t			guc_fw_minor_wanted;
	uint16_t			guc_fw_major_found;
	uint16_t			guc_fw_minor_found;
A
Alex Dai 已提交
116 117 118 119 120 121 122

	uint32_t header_size;
	uint32_t header_offset;
	uint32_t rsa_size;
	uint32_t rsa_offset;
	uint32_t ucode_size;
	uint32_t ucode_offset;
123 124
};

125 126 127
struct intel_guc_log {
	uint32_t flags;
	struct i915_vma *vma;
128 129 130
	void *buf_addr;
	struct workqueue_struct *flush_wq;
	struct work_struct flush_work;
131
	struct rchan *relay_chan;
132 133 134 135 136 137 138

	/* logging related stats */
	u32 capture_miss_count;
	u32 flush_interrupt_count;
	u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
	u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
	u32 flush_count[GUC_MAX_LOG_BUFFER];
139 140
};

141 142
struct intel_guc {
	struct intel_guc_fw guc_fw;
143
	struct intel_guc_log log;
144

145
	/* intel_guc_recv interrupt related state */
146 147
	bool interrupts_enabled;

148 149
	struct i915_vma *ads_vma;
	struct i915_vma *ctx_pool_vma;
150
	struct ida ctx_ids;
151 152 153 154 155 156 157 158 159 160 161 162 163

	struct i915_guc_client *execbuf_client;

	DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
	uint32_t db_cacheline;		/* Cyclic counter mod pagesize	*/

	/* Action status & statistics */
	uint64_t action_count;		/* Total commands issued	*/
	uint32_t action_cmd;		/* Last command word		*/
	uint32_t action_status;		/* Last return status		*/
	uint32_t action_fail;		/* Total number of failures	*/
	int32_t action_err;		/* Last error code		*/

164 165
	uint64_t submissions[I915_NUM_ENGINES];
	uint32_t last_seqno[I915_NUM_ENGINES];
166

167 168
	/* To serialize the intel_guc_send actions */
	struct mutex send_mutex;
169 170
};

171 172 173 174 175 176 177 178
/* intel_uc.c */
bool intel_guc_recv(struct drm_i915_private *dev_priv, u32 *status);
int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_log_flush_complete(struct intel_guc *guc);
int intel_guc_log_flush(struct intel_guc *guc);
int intel_guc_log_control(struct intel_guc *guc, u32 control_val);

179
/* intel_guc_loader.c */
180 181 182
extern void intel_guc_init(struct drm_device *dev);
extern int intel_guc_setup(struct drm_device *dev);
extern void intel_guc_fini(struct drm_device *dev);
183
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
184 185
extern int intel_guc_suspend(struct drm_device *dev);
extern int intel_guc_resume(struct drm_device *dev);
186

187
/* i915_guc_submission.c */
188 189
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
190
int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
191
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
192 193
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
194
void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
195
void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
196 197
void i915_guc_register(struct drm_i915_private *dev_priv);
void i915_guc_unregister(struct drm_i915_private *dev_priv);
198
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
199

200
#endif