i915_gem_timeline.c 4.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "i915_drv.h"
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#include "i915_syncmap.h"

static void __intel_timeline_init(struct intel_timeline *tl,
				  struct i915_gem_timeline *parent,
				  u64 context,
				  struct lock_class_key *lockclass,
				  const char *lockname)
{
	tl->fence_context = context;
	tl->common = parent;
#ifdef CONFIG_DEBUG_SPINLOCK
	__raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
#else
	spin_lock_init(&tl->lock);
#endif
	init_request_active(&tl->last_request, NULL);
	INIT_LIST_HEAD(&tl->requests);
	i915_syncmap_init(&tl->sync);
}

static void __intel_timeline_fini(struct intel_timeline *tl)
{
	GEM_BUG_ON(!list_empty(&tl->requests));

	i915_syncmap_free(&tl->sync);
}
52

53 54 55 56 57
static int __i915_gem_timeline_init(struct drm_i915_private *i915,
				    struct i915_gem_timeline *timeline,
				    const char *name,
				    struct lock_class_key *lockclass,
				    const char *lockname)
58 59 60 61 62 63
{
	unsigned int i;
	u64 fences;

	lockdep_assert_held(&i915->drm.struct_mutex);

64 65 66 67 68 69 70 71
	/*
	 * Ideally we want a set of engines on a single leaf as we expect
	 * to mostly be tracking synchronisation between engines. It is not
	 * a huge issue if this is not the case, but we may want to mitigate
	 * any page crossing penalties if they become an issue.
	 */
	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);

72 73 74 75 76 77 78 79 80
	timeline->i915 = i915;
	timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL);
	if (!timeline->name)
		return -ENOMEM;

	list_add(&timeline->link, &i915->gt.timelines);

	/* Called during early_init before we know how many engines there are */
	fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
81 82 83 84
	for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
		__intel_timeline_init(&timeline->engine[i],
				      timeline, fences++,
				      lockclass, lockname);
85 86 87 88

	return 0;
}

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
int i915_gem_timeline_init(struct drm_i915_private *i915,
			   struct i915_gem_timeline *timeline,
			   const char *name)
{
	static struct lock_class_key class;

	return __i915_gem_timeline_init(i915, timeline, name,
					&class, "&timeline->lock");
}

int i915_gem_timeline_init__global(struct drm_i915_private *i915)
{
	static struct lock_class_key class;

	return __i915_gem_timeline_init(i915,
					&i915->gt.global_timeline,
					"[execution]",
					&class, "&global_timeline->lock");
}

109
/**
110 111
 * i915_gem_timelines_park - called when the driver idles
 * @i915: the drm_i915_private device
112 113 114 115 116 117 118
 *
 * When the driver is completely idle, we know that all of our sync points
 * have been signaled and our tracking is then entirely redundant. Any request
 * to wait upon an older sync point will be completed instantly as we know
 * the fence is signaled and therefore we will not even look them up in the
 * sync point map.
 */
119
void i915_gem_timelines_park(struct drm_i915_private *i915)
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
{
	struct i915_gem_timeline *timeline;
	int i;

	lockdep_assert_held(&i915->drm.struct_mutex);

	list_for_each_entry(timeline, &i915->gt.timelines, link) {
		for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
			struct intel_timeline *tl = &timeline->engine[i];

			/*
			 * All known fences are completed so we can scrap
			 * the current sync point tracking and start afresh,
			 * any attempt to wait upon a previous sync point
			 * will be skipped as the fence was signaled.
			 */
			i915_syncmap_free(&tl->sync);
		}
	}
}

141
void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
142
{
143
	int i;
144

145 146
	lockdep_assert_held(&timeline->i915->drm.struct_mutex);

147 148
	for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
		__intel_timeline_fini(&timeline->engine[i]);
149 150 151

	list_del(&timeline->link);
	kfree(timeline->name);
152
}
153 154 155 156 157

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_timeline.c"
#include "selftests/i915_gem_timeline.c"
#endif