ttm_execbuf_util.c 6.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

28 29 30
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
31 32 33 34
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/module.h>

35
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36 37 38 39 40 41 42 43
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;
		if (!entry->reserved)
			continue;

44
		entry->reserved = false;
45
		if (entry->removed) {
46
			ttm_bo_add_to_lru(bo);
47 48
			entry->removed = false;
		}
49
		__ttm_bo_unreserve(bo);
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	}
}

static void ttm_eu_del_from_lru_locked(struct list_head *list)
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;
		if (!entry->reserved)
			continue;

		if (!entry->removed) {
			entry->put_count = ttm_bo_del_from_lru(bo);
			entry->removed = true;
		}
	}
}

static void ttm_eu_list_ref_sub(struct list_head *list)
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;

		if (entry->put_count) {
			ttm_bo_list_ref_sub(bo, entry->put_count, true);
			entry->put_count = 0;
		}
	}
}

83 84
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
				struct list_head *list)
85 86
{
	struct ttm_validate_buffer *entry;
87
	struct ttm_bo_global *glob;
88

89 90
	if (list_empty(list))
		return;
91

92 93 94
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
	glob = entry->bo->glob;
	spin_lock(&glob->lru_lock);
95 96 97
	ttm_eu_backoff_reservation_locked(list);
	if (ticket)
		ww_acquire_fini(ticket);
98
	spin_unlock(&glob->lru_lock);
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);

/*
 * Reserve buffers for validation.
 *
 * If a buffer in the list is marked for CPU access, we back off and
 * wait for that buffer to become free for GPU access.
 *
 * If a buffer is reserved for another validation, the validator with
 * the highest validation sequence backs off and waits for that buffer
 * to become unreserved. This prevents deadlocks when validating multiple
 * buffers in different orders.
 */

114 115
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
			   struct list_head *list)
116
{
117
	struct ttm_bo_global *glob;
118 119 120
	struct ttm_validate_buffer *entry;
	int ret;

121 122 123 124 125 126 127 128 129 130 131 132
	if (list_empty(list))
		return 0;

	list_for_each_entry(entry, list, head) {
		entry->reserved = false;
		entry->put_count = 0;
		entry->removed = false;
	}

	entry = list_first_entry(list, struct ttm_validate_buffer, head);
	glob = entry->bo->glob;

133 134
	if (ticket)
		ww_acquire_init(ticket, &reservation_ww_class);
135
retry:
136 137 138
	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;

139 140 141 142
		/* already slowpath reserved? */
		if (entry->reserved)
			continue;

143 144
		ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
				       ticket);
145

146 147 148 149 150
		if (ret == -EDEADLK) {
			/* uh oh, we lost out, drop every reservation and try
			 * to only reserve this buffer, then start over if
			 * this succeeds.
			 */
151
			BUG_ON(ticket == NULL);
152
			spin_lock(&glob->lru_lock);
153
			ttm_eu_backoff_reservation_locked(list);
154 155
			spin_unlock(&glob->lru_lock);
			ttm_eu_list_ref_sub(list);
156 157 158 159 160
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
							       ticket);
			if (unlikely(ret != 0)) {
				if (ret == -EINTR)
					ret = -ERESTARTSYS;
161
				goto err_fini;
162
			}
163

164 165 166 167 168
			entry->reserved = true;
			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
				ret = -EBUSY;
				goto err;
			}
169
			goto retry;
170
		} else if (ret)
171
			goto err;
172 173 174

		entry->reserved = true;
		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
175 176
			ret = -EBUSY;
			goto err;
177 178
		}
	}
179

180 181
	if (ticket)
		ww_acquire_done(ticket);
182
	spin_lock(&glob->lru_lock);
183 184 185
	ttm_eu_del_from_lru_locked(list);
	spin_unlock(&glob->lru_lock);
	ttm_eu_list_ref_sub(list);
186
	return 0;
187 188

err:
189
	spin_lock(&glob->lru_lock);
190
	ttm_eu_backoff_reservation_locked(list);
191 192
	spin_unlock(&glob->lru_lock);
	ttm_eu_list_ref_sub(list);
193
err_fini:
194 195 196 197
	if (ticket) {
		ww_acquire_done(ticket);
		ww_acquire_fini(ticket);
	}
198
	return ret;
199 200 201
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);

202 203
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
				 struct list_head *list, void *sync_obj)
204 205
{
	struct ttm_validate_buffer *entry;
206 207 208 209
	struct ttm_buffer_object *bo;
	struct ttm_bo_global *glob;
	struct ttm_bo_device *bdev;
	struct ttm_bo_driver *driver;
210

211 212 213 214 215 216 217
	if (list_empty(list))
		return;

	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
	bdev = bo->bdev;
	driver = bdev->driver;
	glob = bo->glob;
218

219
	spin_lock(&glob->lru_lock);
220
	spin_lock(&bdev->fence_lock);
221 222 223 224

	list_for_each_entry(entry, list, head) {
		bo = entry->bo;
		entry->old_sync_obj = bo->sync_obj;
225
		bo->sync_obj = driver->sync_obj_ref(sync_obj);
226
		ttm_bo_add_to_lru(bo);
227
		__ttm_bo_unreserve(bo);
228
		entry->reserved = false;
229 230
	}
	spin_unlock(&bdev->fence_lock);
231
	spin_unlock(&glob->lru_lock);
232 233
	if (ticket)
		ww_acquire_fini(ticket);
234 235 236 237

	list_for_each_entry(entry, list, head) {
		if (entry->old_sync_obj)
			driver->sync_obj_unref(&entry->old_sync_obj);
238 239 240
	}
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);