intel_ringbuffer.h 8.0 KB
Newer Older
1 2 3
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_

4 5 6 7 8 9 10 11 12 13 14
/*
 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
 *
 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
 * cacheline, the Head Pointer must not be greater than the Tail
 * Pointer."
 */
#define I915_RING_FREE_SPACE 64

15
struct  intel_hw_status_page {
16
	u32		*page_addr;
17
	unsigned int	gfx_addr;
18
	struct		drm_i915_gem_object *obj;
19 20
};

B
Ben Widawsky 已提交
21 22
#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
23

B
Ben Widawsky 已提交
24 25
#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
26

B
Ben Widawsky 已提交
27 28
#define I915_READ_HEAD(ring)  I915_READ(RING_HEAD((ring)->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
29

B
Ben Widawsky 已提交
30 31
#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
32

B
Ben Widawsky 已提交
33 34
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35

36
enum intel_ring_hangcheck_action {
37
	HANGCHECK_IDLE = 0,
38 39 40 41 42
	HANGCHECK_WAIT,
	HANGCHECK_ACTIVE,
	HANGCHECK_KICK,
	HANGCHECK_HUNG,
};
43

44
struct intel_ring_hangcheck {
45
	bool deadlock;
46
	u32 seqno;
47 48
	u32 acthd;
	int score;
49
	enum intel_ring_hangcheck_action action;
50 51
};

52 53
struct  intel_ring_buffer {
	const char	*name;
54
	enum intel_ring_id {
55 56 57
		RCS = 0x0,
		VCS,
		BCS,
58
		VECS,
59
	} id;
60
#define I915_NUM_RINGS 4
61
	u32		mmio_base;
C
Chris Wilson 已提交
62
	void		__iomem *virtual_start;
63
	struct		drm_device *dev;
64
	struct		drm_i915_gem_object *obj;
65

66 67
	u32		head;
	u32		tail;
68
	int		space;
69
	int		size;
70
	int		effective_size;
71 72
	struct intel_hw_status_page status_page;

73 74 75 76 77 78 79 80 81 82
	/** We track the position of the requests in the ring buffer, and
	 * when each is retired we increment last_retired_head as the GPU
	 * must have finished processing the request and so we know we
	 * can advance the ringbuffer up to that position.
	 *
	 * last_retired_head is set to -1 after the value is consumed so
	 * we can detect new retirements.
	 */
	u32		last_retired_head;

83
	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
D
Daniel Vetter 已提交
84
	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
C
Chris Wilson 已提交
85
	u32		trace_irq_seqno;
86
	u32		sync_seqno[I915_NUM_RINGS-1];
87
	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
88
	void		(*irq_put)(struct intel_ring_buffer *ring);
89

90
	int		(*init)(struct intel_ring_buffer *ring);
91

92
	void		(*write_tail)(struct intel_ring_buffer *ring,
93
				      u32 value);
94 95 96
	int __must_check (*flush)(struct intel_ring_buffer *ring,
				  u32	invalidate_domains,
				  u32	flush_domains);
97
	int		(*add_request)(struct intel_ring_buffer *ring);
98 99 100 101 102 103 104 105
	/* Some chipsets are not quite as coherent as advertised and need
	 * an expensive kick to force a true read of the up-to-date seqno.
	 * However, the up-to-date seqno is not always required and the last
	 * seen value is good enough. Note that the seqno will always be
	 * monotonic, even if not coherent.
	 */
	u32		(*get_seqno)(struct intel_ring_buffer *ring,
				     bool lazy_coherency);
M
Mika Kuoppala 已提交
106 107
	void		(*set_seqno)(struct intel_ring_buffer *ring,
				     u32 seqno);
108
	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
109 110 111
					       u32 offset, u32 length,
					       unsigned flags);
#define I915_DISPATCH_SECURE 0x1
112
#define I915_DISPATCH_PINNED 0x2
Z
Zou Nan hai 已提交
113
	void		(*cleanup)(struct intel_ring_buffer *ring);
114 115 116
	int		(*sync_to)(struct intel_ring_buffer *ring,
				   struct intel_ring_buffer *to,
				   u32 seqno);
117

118 119
	/* our mbox written by others */
	u32		semaphore_register[I915_NUM_RINGS];
120 121 122
	/* mboxes this ring signals to */
	u32		signal_mbox[I915_NUM_RINGS];

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
	/**
	 * List of objects currently involved in rendering from the
	 * ringbuffer.
	 *
	 * Includes buffers having the contents of their GPU caches
	 * flushed, not necessarily primitives.  last_rendering_seqno
	 * represents when the rendering involved will be completed.
	 *
	 * A reference is held on the buffer while on this list.
	 */
	struct list_head active_list;

	/**
	 * List of breadcrumbs associated with GPU requests currently
	 * outstanding.
	 */
	struct list_head request_list;

141 142 143
	/**
	 * Do we have some not yet emitted requests outstanding?
	 */
144
	struct drm_i915_gem_request *preallocated_lazy_request;
145
	u32 outstanding_lazy_seqno;
146
	bool gpu_caches_dirty;
147
	bool fbc_dirty;
148

149
	wait_queue_head_t irq_queue;
Z
Zou Nan hai 已提交
150

151 152 153 154
	/**
	 * Do an explicit TLB flush before MI_SET_CONTEXT
	 */
	bool itlb_before_ctx_switch;
155
	struct i915_hw_context *default_context;
156
	struct i915_hw_context *last_context;
157

158 159
	struct intel_ring_hangcheck hangcheck;

160 161 162 163 164
	struct {
		struct drm_i915_gem_object *obj;
		u32 gtt_offset;
		volatile u32 *cpu_page;
	} scratch;
165 166
};

167 168 169 170 171 172
static inline bool
intel_ring_initialized(struct intel_ring_buffer *ring)
{
	return ring->obj != NULL;
}

173 174 175 176 177 178
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
{
	return 1 << ring->id;
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring,
		      struct intel_ring_buffer *other)
{
	int idx;

	/*
	 * cs -> 0 = vcs, 1 = bcs
	 * vcs -> 0 = bcs, 1 = cs,
	 * bcs -> 0 = cs, 1 = vcs.
	 */

	idx = (other - ring) - 1;
	if (idx < 0)
		idx += I915_NUM_RINGS;

	return idx;
}

198 199
static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
200
		       int reg)
201
{
202 203 204
	/* Ensure that the compiler doesn't optimize away the load. */
	barrier();
	return ring->status_page.page_addr[reg];
205 206
}

M
Mika Kuoppala 已提交
207 208 209 210 211 212 213
static inline void
intel_write_status_page(struct intel_ring_buffer *ring,
			int reg, u32 value)
{
	ring->status_page.page_addr[reg] = value;
}

C
Chris Wilson 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
/**
 * Reads a dword out of the status page, which is written to from the command
 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
 * MI_STORE_DATA_IMM.
 *
 * The following dwords have a reserved meaning:
 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
 * 0x04: ring 0 head pointer
 * 0x05: ring 1 head pointer (915-class)
 * 0x06: ring 2 head pointer (915-class)
 * 0x10-0x1b: Context status DWords (GM45)
 * 0x1f: Last written status offset. (GM45)
 *
 * The area from dword 0x20 to 0x3ff is available for driver usage.
 */
#define I915_GEM_HWS_INDEX		0x20
230 231
#define I915_GEM_HWS_SCRATCH_INDEX	0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
C
Chris Wilson 已提交
232

233
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
234

235
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
236 237
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
				   u32 data)
238
{
239
	iowrite32(data, ring->virtual_start + ring->tail);
240 241
	ring->tail += 4;
}
242 243 244 245 246 247
static inline void intel_ring_advance(struct intel_ring_buffer *ring)
{
	ring->tail &= ring->size - 1;
}
void __intel_ring_advance(struct intel_ring_buffer *ring);

248
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
249
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
250 251
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
252

253 254
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
255
int intel_init_blt_ring_buffer(struct drm_device *dev);
B
Ben Widawsky 已提交
256
int intel_init_vebox_ring_buffer(struct drm_device *dev);
257

258 259
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
260

261 262 263 264 265
static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
{
	return ring->tail;
}

266 267
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
{
268 269
	BUG_ON(ring->outstanding_lazy_seqno == 0);
	return ring->outstanding_lazy_seqno;
270 271
}

C
Chris Wilson 已提交
272 273 274 275 276 277
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
	if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
		ring->trace_irq_seqno = seqno;
}

278 279 280
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);

281
#endif /* _INTEL_RINGBUFFER_H_ */