intel_context.h 3.2 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__

10 11
#include <linux/lockdep.h>

12 13 14
#include "intel_context_types.h"
#include "intel_engine_types.h"

15 16 17
struct intel_context *intel_context_alloc(void);
void intel_context_free(struct intel_context *ce);

18 19 20 21
void intel_context_init(struct intel_context *ce,
			struct i915_gem_context *ctx,
			struct intel_engine_cs *engine);

22 23 24 25 26 27 28 29 30 31 32 33
/**
 * intel_context_lookup - Find the matching HW context for this (ctx, engine)
 * @ctx - the parent GEM context
 * @engine - the target HW engine
 *
 * May return NULL if the HW context hasn't been instantiated (i.e. unused).
 */
struct intel_context *
intel_context_lookup(struct i915_gem_context *ctx,
		     struct intel_engine_cs *engine);

/**
34
 * intel_context_pin_lock - Stablises the 'pinned' status of the HW context
35 36 37
 * @ctx - the parent GEM context
 * @engine - the target HW engine
 *
38 39 40
 * Acquire a lock on the pinned status of the HW context, such that the context
 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
 * intel_context_is_pinned() remains stable.
41 42
 */
struct intel_context *
43
intel_context_pin_lock(struct i915_gem_context *ctx,
44 45
		       struct intel_engine_cs *engine);

46 47 48 49 50 51
static inline bool
intel_context_is_pinned(struct intel_context *ce)
{
	return atomic_read(&ce->pin_count);
}

52
void intel_context_pin_unlock(struct intel_context *ce);
53

54 55 56 57 58 59
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
		       struct intel_engine_cs *engine,
		       struct intel_context *ce);
void
__intel_context_remove(struct intel_context *ce);
60

61
struct intel_context *
62 63 64 65 66 67 68 69 70 71 72 73
intel_context_instance(struct i915_gem_context *ctx,
		       struct intel_engine_cs *engine);

int __intel_context_do_pin(struct intel_context *ce);

static inline int intel_context_pin(struct intel_context *ce)
{
	if (likely(atomic_inc_not_zero(&ce->pin_count)))
		return 0;

	return __intel_context_do_pin(ce);
}
74 75 76

static inline void __intel_context_pin(struct intel_context *ce)
{
77 78
	GEM_BUG_ON(!intel_context_is_pinned(ce));
	atomic_inc(&ce->pin_count);
79 80
}

81
void intel_context_unpin(struct intel_context *ce);
82

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
void intel_context_enter_engine(struct intel_context *ce);
void intel_context_exit_engine(struct intel_context *ce);

static inline void intel_context_enter(struct intel_context *ce)
{
	if (!ce->active_count++)
		ce->ops->enter(ce);
}

static inline void intel_context_mark_active(struct intel_context *ce)
{
	++ce->active_count;
}

static inline void intel_context_exit(struct intel_context *ce)
{
	GEM_BUG_ON(!ce->active_count);
	if (!--ce->active_count)
		ce->ops->exit(ce);
}

104 105 106 107 108 109 110 111 112 113 114
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
	kref_get(&ce->ref);
	return ce;
}

static inline void intel_context_put(struct intel_context *ce)
{
	kref_put(&ce->ref, ce->ops->destroy);
}

115 116 117 118 119 120 121 122 123 124 125 126
static inline void intel_context_timeline_lock(struct intel_context *ce)
	__acquires(&ce->ring->timeline->mutex)
{
	mutex_lock(&ce->ring->timeline->mutex);
}

static inline void intel_context_timeline_unlock(struct intel_context *ce)
	__releases(&ce->ring->timeline->mutex)
{
	mutex_unlock(&ce->ring->timeline->mutex);
}

127
#endif /* __INTEL_CONTEXT_H__ */