mock_engine.c 6.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2016 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "mock_engine.h"
26
#include "mock_request.h"
27

28 29 30 31 32
struct mock_ring {
	struct intel_ring base;
	struct i915_timeline timeline;
};

33
static struct mock_request *first_request(struct mock_engine *engine)
34
{
35 36 37 38 39
	return list_first_entry_or_null(&engine->hw_queue,
					struct mock_request,
					link);
}

40 41 42 43 44 45 46
static void advance(struct mock_engine *engine,
		    struct mock_request *request)
{
	list_del_init(&request->link);
	mock_seqno_advance(&engine->base, request->base.global_seqno);
}

47
static void hw_delay_complete(struct timer_list *t)
48
{
49
	struct mock_engine *engine = from_timer(engine, t, hw_delay);
50 51 52 53
	struct mock_request *request;

	spin_lock(&engine->hw_lock);

54
	/* Timer fired, first request is complete */
55 56
	request = first_request(engine);
	if (request)
57 58 59 60 61 62 63 64 65 66 67 68 69 70
		advance(engine, request);

	/*
	 * Also immediately signal any subsequent 0-delay requests, but
	 * requeue the timer for the next delayed request.
	 */
	while ((request = first_request(engine))) {
		if (request->delay) {
			mod_timer(&engine->hw_delay, jiffies + request->delay);
			break;
		}

		advance(engine, request);
	}
71 72 73 74

	spin_unlock(&engine->hw_lock);
}

75
static void mock_context_unpin(struct intel_context *ce)
76
{
77 78
	i915_gem_context_put(ce->gem_context);
}
79

80 81 82
static void mock_context_destroy(struct intel_context *ce)
{
	GEM_BUG_ON(ce->pin_count);
83 84
}

85 86 87 88 89 90 91 92
static const struct intel_context_ops mock_context_ops = {
	.unpin = mock_context_unpin,
	.destroy = mock_context_destroy,
};

static struct intel_context *
mock_context_pin(struct intel_engine_cs *engine,
		 struct i915_gem_context *ctx)
93
{
94 95
	struct intel_context *ce = to_intel_context(ctx, engine);

96 97 98 99 100 101 102
	if (!ce->pin_count++) {
		i915_gem_context_get(ctx);
		ce->ring = engine->buffer;
		ce->ops = &mock_context_ops;
	}

	return ce;
103 104
}

105
static int mock_request_alloc(struct i915_request *request)
106 107 108 109 110 111 112 113 114
{
	struct mock_request *mock = container_of(request, typeof(*mock), base);

	INIT_LIST_HEAD(&mock->link);
	mock->delay = 0;

	return 0;
}

115
static int mock_emit_flush(struct i915_request *request,
116 117 118 119 120
			   unsigned int flags)
{
	return 0;
}

121
static void mock_emit_breadcrumb(struct i915_request *request,
122 123 124 125
				 u32 *flags)
{
}

126
static void mock_submit_request(struct i915_request *request)
127 128 129 130 131
{
	struct mock_request *mock = container_of(request, typeof(*mock), base);
	struct mock_engine *engine =
		container_of(request->engine, typeof(*engine), base);

132
	i915_request_submit(request);
133 134 135 136
	GEM_BUG_ON(!request->global_seqno);

	spin_lock_irq(&engine->hw_lock);
	list_add_tail(&mock->link, &engine->hw_queue);
137 138 139 140 141 142
	if (mock->link.prev == &engine->hw_queue) {
		if (mock->delay)
			mod_timer(&engine->hw_delay, jiffies + mock->delay);
		else
			advance(engine, mock);
	}
143 144 145 146 147
	spin_unlock_irq(&engine->hw_lock);
}

static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
148
	const unsigned long sz = PAGE_SIZE / 2;
149
	struct mock_ring *ring;
150 151 152 153 154

	ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
	if (!ring)
		return NULL;

155
	i915_timeline_init(engine->i915, &ring->timeline, engine->name);
156

157 158 159 160
	ring->base.size = sz;
	ring->base.effective_size = sz;
	ring->base.vaddr = (void *)(ring + 1);
	ring->base.timeline = &ring->timeline;
161

162 163
	INIT_LIST_HEAD(&ring->base.request_list);
	intel_ring_update_space(&ring->base);
164

165
	return &ring->base;
166 167
}

168
static void mock_ring_free(struct intel_ring *base)
169
{
170 171 172
	struct mock_ring *ring = container_of(base, typeof(*ring), base);

	i915_timeline_fini(&ring->timeline);
173 174 175
	kfree(ring);
}

176
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
177 178
				    const char *name,
				    int id)
179 180
{
	struct mock_engine *engine;
181 182

	GEM_BUG_ON(id >= I915_NUM_ENGINES);
183 184 185 186 187

	engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
	if (!engine)
		return NULL;

188 189
	/* minimal engine setup for requests */
	engine->base.i915 = i915;
190
	snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
191
	engine->base.id = id;
192
	engine->base.status_page.page_addr = (void *)(engine + 1);
193

194 195 196 197 198 199
	engine->base.context_pin = mock_context_pin;
	engine->base.request_alloc = mock_request_alloc;
	engine->base.emit_flush = mock_emit_flush;
	engine->base.emit_breadcrumb = mock_emit_breadcrumb;
	engine->base.submit_request = mock_submit_request;

200
	i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
201
	i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
202

203 204 205 206
	intel_engine_init_breadcrumbs(&engine->base);

	/* fake hw queue */
	spin_lock_init(&engine->hw_lock);
207
	timer_setup(&engine->hw_delay, hw_delay_complete, 0);
208 209
	INIT_LIST_HEAD(&engine->hw_queue);

210 211 212 213
	engine->base.buffer = mock_ring(&engine->base);
	if (!engine->base.buffer)
		goto err_breadcrumbs;

214 215 216
	if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
		goto err_ring;

217
	return &engine->base;
218

219 220
err_ring:
	mock_ring_free(engine->base.buffer);
221 222
err_breadcrumbs:
	intel_engine_fini_breadcrumbs(&engine->base);
223
	i915_timeline_fini(&engine->base.timeline);
224 225
	kfree(engine);
	return NULL;
226 227 228 229
}

void mock_engine_flush(struct intel_engine_cs *engine)
{
230 231 232 233 234 235 236 237 238 239 240 241
	struct mock_engine *mock =
		container_of(engine, typeof(*mock), base);
	struct mock_request *request, *rn;

	del_timer_sync(&mock->hw_delay);

	spin_lock_irq(&mock->hw_lock);
	list_for_each_entry_safe(request, rn, &mock->hw_queue, link) {
		list_del_init(&request->link);
		mock_seqno_advance(&mock->base, request->base.global_seqno);
	}
	spin_unlock_irq(&mock->hw_lock);
242 243 244 245 246 247
}

void mock_engine_reset(struct intel_engine_cs *engine)
{
	intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0);
}
248 249 250 251 252

void mock_engine_free(struct intel_engine_cs *engine)
{
	struct mock_engine *mock =
		container_of(engine, typeof(*mock), base);
253
	struct intel_context *ce;
254 255 256

	GEM_BUG_ON(timer_pending(&mock->hw_delay));

257 258 259
	ce = fetch_and_zero(&engine->last_retired_context);
	if (ce)
		intel_context_unpin(ce);
260

261 262
	__intel_context_unpin(engine->i915->kernel_context, engine);

263 264
	mock_ring_free(engine->buffer);

265
	intel_engine_fini_breadcrumbs(engine);
266
	i915_timeline_fini(&engine->timeline);
267 268 269

	kfree(engine);
}