i915_gem_evict.c 5.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008-2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uuk>
 *
 */

29
#include <drm/drmP.h>
30
#include "i915_drv.h"
31
#include <drm/i915_drm.h>
C
Chris Wilson 已提交
32
#include "i915_trace.h"
33

34
static bool
35
mark_free(struct i915_vma *vma, struct list_head *unwind)
36
{
37
	if (vma->obj->pin_count)
38 39
		return false;

40
	list_add(&vma->exec_list, unwind);
B
Ben Widawsky 已提交
41
	return drm_mm_scan_add_block(&vma->node);
42 43 44
}

int
45 46
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
			 int min_size, unsigned alignment, unsigned cache_level,
47
			 bool mappable, bool nonblocking)
48 49
{
	drm_i915_private_t *dev_priv = dev->dev_private;
50
	struct list_head eviction_list, unwind_list;
B
Ben Widawsky 已提交
51
	struct i915_vma *vma;
52
	int ret = 0;
53

C
Chris Wilson 已提交
54 55
	trace_i915_gem_evict(dev, min_size, alignment, mappable);

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	/*
	 * The goal is to evict objects and amalgamate space in LRU order.
	 * The oldest idle objects reside on the inactive list, which is in
	 * retirement order. The next objects to retire are those on the (per
	 * ring) active list that do not have an outstanding flush. Once the
	 * hardware reports completion (the seqno is updated after the
	 * batchbuffer has been finished) the clean buffer objects would
	 * be retired to the inactive list. Any dirty objects would be added
	 * to the tail of the flushing list. So after processing the clean
	 * active objects we need to emit a MI_FLUSH to retire the flushing
	 * list, hence the retirement order of the flushing list is in
	 * advance of the dirty objects on the active lists.
	 *
	 * The retirement sequence is thus:
	 *   1. Inactive objects (already retired)
	 *   2. Clean active objects
	 *   3. Flushing list
	 *   4. Dirty active objects.
	 *
	 * On each list, the oldest objects lie at the HEAD with the freshest
	 * object on the TAIL.
	 */

	INIT_LIST_HEAD(&unwind_list);
80 81
	if (mappable) {
		BUG_ON(!i915_is_ggtt(vm));
82
		drm_mm_init_scan_with_range(&vm->mm, min_size,
83 84
					    alignment, cache_level, 0,
					    dev_priv->gtt.mappable_end);
85
	} else
86
		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
87 88

	/* First see if there is a large enough contiguous idle region... */
B
Ben Widawsky 已提交
89
	list_for_each_entry(vma, &vm->inactive_list, mm_list) {
90
		if (mark_free(vma, &unwind_list))
91 92
			goto found;
	}
93

94 95
	if (nonblocking)
		goto none;
96

97
	/* Now merge in the soon-to-be-expired objects... */
B
Ben Widawsky 已提交
98
	list_for_each_entry(vma, &vm->active_list, mm_list) {
99
		if (mark_free(vma, &unwind_list))
100 101 102
			goto found;
	}

103
none:
104
	/* Nothing found, clean up and bail out! */
105
	while (!list_empty(&unwind_list)) {
106 107
		vma = list_first_entry(&unwind_list,
				       struct i915_vma,
108
				       exec_list);
B
Ben Widawsky 已提交
109
		ret = drm_mm_scan_remove_block(&vma->node);
110
		BUG_ON(ret);
111

112
		list_del_init(&vma->exec_list);
113 114 115 116 117 118 119 120
	}

	/* We expect the caller to unpin, evict all and try again, or give up.
	 * So calling i915_gem_evict_everything() is unnecessary.
	 */
	return -ENOSPC;

found:
121 122 123
	/* drm_mm doesn't allow any other other operations while
	 * scanning, therefore store to be evicted objects on a
	 * temporary list. */
124
	INIT_LIST_HEAD(&eviction_list);
125
	while (!list_empty(&unwind_list)) {
126 127
		vma = list_first_entry(&unwind_list,
				       struct i915_vma,
128
				       exec_list);
B
Ben Widawsky 已提交
129
		if (drm_mm_scan_remove_block(&vma->node)) {
130 131
			list_move(&vma->exec_list, &eviction_list);
			drm_gem_object_reference(&vma->obj->base);
132 133
			continue;
		}
134
		list_del_init(&vma->exec_list);
135
	}
136

137
	/* Unbinding will emit any required flushes */
138
	while (!list_empty(&eviction_list)) {
139 140
		vma = list_first_entry(&eviction_list,
				       struct i915_vma,
141
				       exec_list);
142
		if (ret == 0)
143
			ret = i915_vma_unbind(vma);
144

145 146
		list_del_init(&vma->exec_list);
		drm_gem_object_unreference(&vma->obj->base);
147
	}
148

149
	return ret;
150 151 152
}

int
C
Chris Wilson 已提交
153
i915_gem_evict_everything(struct drm_device *dev)
154 155
{
	drm_i915_private_t *dev_priv = dev->dev_private;
156
	struct i915_address_space *vm;
B
Ben Widawsky 已提交
157
	struct i915_vma *vma, *next;
158
	bool lists_empty = true;
159
	int ret;
160

161 162 163 164 165 166 167
	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
		lists_empty = (list_empty(&vm->inactive_list) &&
			       list_empty(&vm->active_list));
		if (!lists_empty)
			lists_empty = false;
	}

168 169 170
	if (lists_empty)
		return -ENOSPC;

C
Chris Wilson 已提交
171
	trace_i915_gem_evict_everything(dev);
C
Chris Wilson 已提交
172

173 174 175 176
	/* The gpu_idle will flush everything in the write domain to the
	 * active list. Then we must move everything off the active list
	 * with retire requests.
	 */
177 178 179
	ret = i915_gpu_idle(dev);
	if (ret)
		return ret;
180 181 182

	i915_gem_retire_requests(dev);

183
	/* Having flushed everything, unbind() should never raise an error */
184
	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
B
Ben Widawsky 已提交
185 186 187
		list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
			if (vma->obj->pin_count == 0)
				WARN_ON(i915_vma_unbind(vma));
188
	}
189

190
	return 0;
191
}