intel_context.h 3.0 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2019 Intel Corporation
 */

#ifndef __INTEL_CONTEXT_H__
#define __INTEL_CONTEXT_H__

10 11
#include <linux/lockdep.h>

12 13 14
#include "intel_context_types.h"
#include "intel_engine_types.h"

15 16 17
struct intel_context *intel_context_alloc(void);
void intel_context_free(struct intel_context *ce);

18 19 20 21
void intel_context_init(struct intel_context *ce,
			struct i915_gem_context *ctx,
			struct intel_engine_cs *engine);

22 23 24 25 26 27 28 29 30 31 32 33
/**
 * intel_context_lookup - Find the matching HW context for this (ctx, engine)
 * @ctx - the parent GEM context
 * @engine - the target HW engine
 *
 * May return NULL if the HW context hasn't been instantiated (i.e. unused).
 */
struct intel_context *
intel_context_lookup(struct i915_gem_context *ctx,
		     struct intel_engine_cs *engine);

/**
34
 * intel_context_pin_lock - Stablises the 'pinned' status of the HW context
35 36 37
 * @ctx - the parent GEM context
 * @engine - the target HW engine
 *
38 39 40
 * Acquire a lock on the pinned status of the HW context, such that the context
 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
 * intel_context_is_pinned() remains stable.
41 42
 */
struct intel_context *
43
intel_context_pin_lock(struct i915_gem_context *ctx,
44 45
		       struct intel_engine_cs *engine);

46 47 48 49 50 51 52 53 54 55 56 57
static inline bool
intel_context_is_pinned(struct intel_context *ce)
{
	return atomic_read(&ce->pin_count);
}

static inline void intel_context_pin_unlock(struct intel_context *ce)
__releases(ce->pin_mutex)
{
	mutex_unlock(&ce->pin_mutex);
}

58 59 60 61 62 63
struct intel_context *
__intel_context_insert(struct i915_gem_context *ctx,
		       struct intel_engine_cs *engine,
		       struct intel_context *ce);
void
__intel_context_remove(struct intel_context *ce);
64

65 66
struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
67 68 69

static inline void __intel_context_pin(struct intel_context *ce)
{
70 71
	GEM_BUG_ON(!intel_context_is_pinned(ce));
	atomic_inc(&ce->pin_count);
72 73
}

74
void intel_context_unpin(struct intel_context *ce);
75

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
void intel_context_enter_engine(struct intel_context *ce);
void intel_context_exit_engine(struct intel_context *ce);

static inline void intel_context_enter(struct intel_context *ce)
{
	if (!ce->active_count++)
		ce->ops->enter(ce);
}

static inline void intel_context_mark_active(struct intel_context *ce)
{
	++ce->active_count;
}

static inline void intel_context_exit(struct intel_context *ce)
{
	GEM_BUG_ON(!ce->active_count);
	if (!--ce->active_count)
		ce->ops->exit(ce);
}

97 98 99 100 101 102 103 104 105 106 107
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
	kref_get(&ce->ref);
	return ce;
}

static inline void intel_context_put(struct intel_context *ce)
{
	kref_put(&ce->ref, ce->ops->destroy);
}

108 109 110 111 112 113 114 115 116 117 118 119
static inline void intel_context_timeline_lock(struct intel_context *ce)
	__acquires(&ce->ring->timeline->mutex)
{
	mutex_lock(&ce->ring->timeline->mutex);
}

static inline void intel_context_timeline_unlock(struct intel_context *ce)
	__releases(&ce->ring->timeline->mutex)
{
	mutex_unlock(&ce->ring->timeline->mutex);
}

120
#endif /* __INTEL_CONTEXT_H__ */