i915_gem_batch_pool.c 4.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "i915_drv.h"
26
#include "i915_gem_batch_pool.h"
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43

/**
 * DOC: batch pool
 *
 * In order to submit batch buffers as 'secure', the software command parser
 * must ensure that a batch buffer cannot be modified after parsing. It does
 * this by copying the user provided batch buffer contents to a kernel owned
 * buffer from which the hardware will actually execute, and by carefully
 * managing the address space bindings for such buffers.
 *
 * The batch pool framework provides a mechanism for the driver to manage a
 * set of scratch buffers to use for this purpose. The framework can be
 * extended to support other uses cases should they arise.
 */

/**
 * i915_gem_batch_pool_init() - initialize a batch buffer pool
44
 * @engine: the associated request submission engine
45 46
 * @pool: the batch buffer pool
 */
47
void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
48 49
			      struct i915_gem_batch_pool *pool)
{
50 51
	int n;

52
	pool->engine = engine;
53 54 55

	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
		INIT_LIST_HEAD(&pool->cache_list[n]);
56 57 58 59 60 61 62 63 64 65
}

/**
 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
 * @pool: the pool to clean up
 *
 * Note: Callers must hold the struct_mutex.
 */
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
66 67
	int n;

68
	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
69

70
	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
71
		struct drm_i915_gem_object *obj, *next;
72

73 74 75
		list_for_each_entry_safe(obj, next,
					 &pool->cache_list[n],
					 batch_pool_link)
76
			__i915_gem_object_release_unless_active(obj);
77 78

		INIT_LIST_HEAD(&pool->cache_list[n]);
79 80 81 82
	}
}

/**
C
Chris Wilson 已提交
83
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
84 85 86
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
C
Chris Wilson 已提交
87 88 89
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
90 91 92
 *
 * Note: Callers must hold the struct_mutex
 *
C
Chris Wilson 已提交
93
 * Return: the buffer object or an error pointer
94 95 96 97 98 99 100
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj = NULL;
	struct drm_i915_gem_object *tmp, *next;
101 102
	struct list_head *list;
	int n;
103

104
	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
105

106 107 108 109 110 111 112 113 114 115
	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
116
		/* The batches are strictly LRU ordered */
117 118
		if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
					     &tmp->base.dev->struct_mutex))
119
			break;
120 121 122

		/* While we're looping, do some clean up */
		if (tmp->madv == __I915_MADV_PURGED) {
123
			list_del(&tmp->batch_pool_link);
124
			i915_gem_object_put(tmp);
125 126 127
			continue;
		}

128
		if (tmp->base.size >= size) {
129 130 131 132 133
			obj = tmp;
			break;
		}
	}

C
Chris Wilson 已提交
134 135 136
	if (obj == NULL) {
		int ret;

137
		obj = i915_gem_object_create(&pool->engine->i915->drm, size);
138 139
		if (IS_ERR(obj))
			return obj;
140

C
Chris Wilson 已提交
141 142 143
		ret = i915_gem_object_get_pages(obj);
		if (ret)
			return ERR_PTR(ret);
144

C
Chris Wilson 已提交
145 146
		obj->madv = I915_MADV_DONTNEED;
	}
147

148
	list_move_tail(&obj->batch_pool_link, list);
C
Chris Wilson 已提交
149
	i915_gem_object_pin_pages(obj);
150 151
	return obj;
}