intel_context.h 9.3 KB
Newer Older
C
Chris Wilson 已提交
1
/* SPDX-License-Identifier: MIT */
2 3 4 5 6 7 8
/*
 * Copyright © 2019 Intel Corporation
 */

#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__

9
#include <linux/bitops.h>
10
#include <linux/lockdep.h>
11
#include <linux/types.h>
12

13
#include "i915_active.h"
14
#include "i915_drv.h"
15 16
#include "intel_context_types.h"
#include "intel_engine_types.h"
17
#include "intel_ring_types.h"
18
#include "intel_timeline_types.h"
19
#include "i915_trace.h"
20

21 22
#define CE_TRACE(ce, fmt, ...) do {					\
	const struct intel_context *ce__ = (ce);			\
23
	ENGINE_TRACE(ce__->engine, "context:%llx " fmt,			\
24 25 26 27
		     ce__->timeline->fence_context,			\
		     ##__VA_ARGS__);					\
} while (0)

28 29
struct i915_gem_ww_ctx;

30 31
void intel_context_init(struct intel_context *ce,
			struct intel_engine_cs *engine);
32
void intel_context_fini(struct intel_context *ce);
33

34 35 36
void i915_context_module_exit(void);
int i915_context_module_init(void);

37
struct intel_context *
38
intel_context_create(struct intel_engine_cs *engine);
39

40 41
int intel_context_alloc_state(struct intel_context *ce);

42 43
void intel_context_free(struct intel_context *ce);

44 45 46
int intel_context_reconfigure_sseu(struct intel_context *ce,
				   const struct intel_sseu sseu);

47 48
#define PARENT_SCRATCH_SIZE	PAGE_SIZE

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
static inline bool intel_context_is_child(struct intel_context *ce)
{
	return !!ce->parallel.parent;
}

static inline bool intel_context_is_parent(struct intel_context *ce)
{
	return !!ce->parallel.number_children;
}

static inline bool intel_context_is_pinned(struct intel_context *ce);

static inline struct intel_context *
intel_context_to_parent(struct intel_context *ce)
{
	if (intel_context_is_child(ce)) {
		/*
		 * The parent holds ref count to the child so it is always safe
		 * for the parent to access the child, but the child has a
		 * pointer to the parent without a ref. To ensure this is safe
		 * the child should only access the parent pointer while the
		 * parent is pinned.
		 */
		GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));

		return ce->parallel.parent;
	} else {
		return ce;
	}
}

80 81 82 83 84
static inline bool intel_context_is_parallel(struct intel_context *ce)
{
	return intel_context_is_child(ce) || intel_context_is_parent(ce);
}

85 86 87 88 89 90 91 92 93 94
void intel_context_bind_parent_child(struct intel_context *parent,
				     struct intel_context *child);

#define for_each_child(parent, ce)\
	list_for_each_entry(ce, &(parent)->parallel.child_list,\
			    parallel.child_link)
#define for_each_child_safe(parent, ce, cn)\
	list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\
				 parallel.child_link)

95
/**
96 97
 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
 * @ce - the context
98
 *
99 100 101
 * Acquire a lock on the pinned status of the HW context, such that the context
 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
 * intel_context_is_pinned() remains stable.
102
 */
103 104 105 106 107
static inline int intel_context_lock_pinned(struct intel_context *ce)
	__acquires(ce->pin_mutex)
{
	return mutex_lock_interruptible(&ce->pin_mutex);
}
108

109 110 111 112 113 114 115 116 117
/**
 * intel_context_is_pinned - Reports the 'pinned' status
 * @ce - the context
 *
 * While in use by the GPU, the context, along with its ring and page
 * tables is pinned into memory and the GTT.
 *
 * Returns: true if the context is currently pinned for use by the GPU.
 */
118 119 120 121 122 123
static inline bool
intel_context_is_pinned(struct intel_context *ce)
{
	return atomic_read(&ce->pin_count);
}

124 125 126 127 128 129 130
static inline void intel_context_cancel_request(struct intel_context *ce,
						struct i915_request *rq)
{
	GEM_BUG_ON(!ce->ops->cancel_request);
	return ce->ops->cancel_request(ce, rq);
}

131 132 133 134 135 136 137 138 139 140 141
/**
 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
 * @ce - the context
 *
 * Releases the lock earlier acquired by intel_context_unlock_pinned().
 */
static inline void intel_context_unlock_pinned(struct intel_context *ce)
	__releases(ce->pin_mutex)
{
	mutex_unlock(&ce->pin_mutex);
}
142

143
int __intel_context_do_pin(struct intel_context *ce);
144 145
int __intel_context_do_pin_ww(struct intel_context *ce,
			      struct i915_gem_ww_ctx *ww);
146

147 148 149 150 151
static inline bool intel_context_pin_if_active(struct intel_context *ce)
{
	return atomic_inc_not_zero(&ce->pin_count);
}

152 153
static inline int intel_context_pin(struct intel_context *ce)
{
154
	if (likely(intel_context_pin_if_active(ce)))
155 156 157 158
		return 0;

	return __intel_context_do_pin(ce);
}
159

160 161 162 163 164 165 166 167 168
static inline int intel_context_pin_ww(struct intel_context *ce,
				       struct i915_gem_ww_ctx *ww)
{
	if (likely(intel_context_pin_if_active(ce)))
		return 0;

	return __intel_context_do_pin_ww(ce, ww);
}

169 170
static inline void __intel_context_pin(struct intel_context *ce)
{
171 172
	GEM_BUG_ON(!intel_context_is_pinned(ce));
	atomic_inc(&ce->pin_count);
173 174
}

175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
void __intel_context_do_unpin(struct intel_context *ce, int sub);

static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
{
	__intel_context_do_unpin(ce, 2);
}

static inline void intel_context_unpin(struct intel_context *ce)
{
	if (!ce->ops->sched_disable) {
		__intel_context_do_unpin(ce, 1);
	} else {
		/*
		 * Move ownership of this pin to the scheduling disable which is
		 * an async operation. When that operation completes the above
		 * intel_context_sched_disable_unpin is called potentially
		 * unpinning the context.
		 */
		while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
			if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
				ce->ops->sched_disable(ce);
				break;
			}
		}
	}
}
201

202 203 204 205 206
void intel_context_enter_engine(struct intel_context *ce);
void intel_context_exit_engine(struct intel_context *ce);

static inline void intel_context_enter(struct intel_context *ce)
{
207
	lockdep_assert_held(&ce->timeline->mutex);
208 209 210 211 212 213
	if (!ce->active_count++)
		ce->ops->enter(ce);
}

static inline void intel_context_mark_active(struct intel_context *ce)
{
214 215
	lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
		       test_bit(CONTEXT_IS_PARKING, &ce->flags));
216 217 218 219 220
	++ce->active_count;
}

static inline void intel_context_exit(struct intel_context *ce)
{
221
	lockdep_assert_held(&ce->timeline->mutex);
222 223 224 225 226
	GEM_BUG_ON(!ce->active_count);
	if (!--ce->active_count)
		ce->ops->exit(ce);
}

227 228 229 230 231 232 233 234 235 236 237
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
	kref_get(&ce->ref);
	return ce;
}

static inline void intel_context_put(struct intel_context *ce)
{
	kref_put(&ce->ref, ce->ops->destroy);
}

238
static inline struct intel_timeline *__must_check
239
intel_context_timeline_lock(struct intel_context *ce)
240
	__acquires(&ce->timeline->mutex)
241
{
242 243 244
	struct intel_timeline *tl = ce->timeline;
	int err;

M
Matthew Brost 已提交
245 246 247 248 249 250 251
	if (intel_context_is_parent(ce))
		err = mutex_lock_interruptible_nested(&tl->mutex, 0);
	else if (intel_context_is_child(ce))
		err = mutex_lock_interruptible_nested(&tl->mutex,
						      ce->parallel.child_index + 1);
	else
		err = mutex_lock_interruptible(&tl->mutex);
252 253 254 255
	if (err)
		return ERR_PTR(err);

	return tl;
256 257
}

258 259
static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
	__releases(&tl->mutex)
260
{
261
	mutex_unlock(&tl->mutex);
262 263
}

264 265 266
int intel_context_prepare_remote_request(struct intel_context *ce,
					 struct i915_request *rq);

267 268
struct i915_request *intel_context_create_request(struct intel_context *ce);

269 270 271
struct i915_request *
intel_context_find_active_request(struct intel_context *ce);

272 273 274 275 276
static inline bool intel_context_is_barrier(const struct intel_context *ce)
{
	return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
}

277 278 279 280 281
static inline bool intel_context_is_closed(const struct intel_context *ce)
{
	return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
}

282 283 284 285 286
static inline bool intel_context_has_inflight(const struct intel_context *ce)
{
	return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
{
	return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}

static inline void intel_context_set_use_semaphores(struct intel_context *ce)
{
	set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}

static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
{
	clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}

302 303 304 305 306 307 308 309 310 311
static inline bool intel_context_is_banned(const struct intel_context *ce)
{
	return test_bit(CONTEXT_BANNED, &ce->flags);
}

static inline bool intel_context_set_banned(struct intel_context *ce)
{
	return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
}

312 313 314 315 316 317 318 319 320 321 322 323
static inline bool intel_context_ban(struct intel_context *ce,
				     struct i915_request *rq)
{
	bool ret = intel_context_set_banned(ce);

	trace_intel_context_ban(ce);
	if (ce->ops->ban)
		ce->ops->ban(ce, rq);

	return ret;
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
{
	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
}

static inline void
intel_context_set_single_submission(struct intel_context *ce)
{
	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
}

static inline bool
intel_context_nopreempt(const struct intel_context *ce)
{
	return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
}

static inline void
intel_context_set_nopreempt(struct intel_context *ce)
{
	set_bit(CONTEXT_NOPREEMPT, &ce->flags);
}

static inline void
intel_context_clear_nopreempt(struct intel_context *ce)
{
	clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}

354 355
u64 intel_context_get_total_runtime_ns(const struct intel_context *ce);
u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);
356

357
static inline u64 intel_context_clock(void)
358
{
359 360
	/* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */
	return ktime_get_raw_fast_ns();
361 362
}

363
#endif /* __INTEL_CONTEXT_H__ */