ttm_execbuf_util.c 6.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/**************************************************************************
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/

28 29 30
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
31 32 33 34
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/module.h>

35 36
static void ttm_eu_backoff_reservation_locked(struct list_head *list,
					      struct ww_acquire_ctx *ticket)
37 38 39 40 41 42 43 44
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;
		if (!entry->reserved)
			continue;

45
		entry->reserved = false;
46
		if (entry->removed) {
47
			ttm_bo_add_to_lru(bo);
48 49
			entry->removed = false;
		}
50
		ww_mutex_unlock(&bo->resv->lock);
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	}
}

static void ttm_eu_del_from_lru_locked(struct list_head *list)
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;
		if (!entry->reserved)
			continue;

		if (!entry->removed) {
			entry->put_count = ttm_bo_del_from_lru(bo);
			entry->removed = true;
		}
	}
}

static void ttm_eu_list_ref_sub(struct list_head *list)
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;

		if (entry->put_count) {
			ttm_bo_list_ref_sub(bo, entry->put_count, true);
			entry->put_count = 0;
		}
	}
}

84 85
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
				struct list_head *list)
86 87
{
	struct ttm_validate_buffer *entry;
88
	struct ttm_bo_global *glob;
89

90 91
	if (list_empty(list))
		return;
92

93 94 95
	entry = list_first_entry(list, struct ttm_validate_buffer, head);
	glob = entry->bo->glob;
	spin_lock(&glob->lru_lock);
96 97
	ttm_eu_backoff_reservation_locked(list, ticket);
	ww_acquire_fini(ticket);
98
	spin_unlock(&glob->lru_lock);
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);

/*
 * Reserve buffers for validation.
 *
 * If a buffer in the list is marked for CPU access, we back off and
 * wait for that buffer to become free for GPU access.
 *
 * If a buffer is reserved for another validation, the validator with
 * the highest validation sequence backs off and waits for that buffer
 * to become unreserved. This prevents deadlocks when validating multiple
 * buffers in different orders.
 */

114 115
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
			   struct list_head *list)
116
{
117
	struct ttm_bo_global *glob;
118 119 120
	struct ttm_validate_buffer *entry;
	int ret;

121 122 123 124 125 126 127 128 129 130 131 132
	if (list_empty(list))
		return 0;

	list_for_each_entry(entry, list, head) {
		entry->reserved = false;
		entry->put_count = 0;
		entry->removed = false;
	}

	entry = list_first_entry(list, struct ttm_validate_buffer, head);
	glob = entry->bo->glob;

133
	ww_acquire_init(ticket, &reservation_ww_class);
134
retry:
135 136 137
	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;

138 139 140 141
		/* already slowpath reserved? */
		if (entry->reserved)
			continue;

142

143
		ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
144

145 146 147 148 149 150
		if (ret == -EDEADLK) {
			/* uh oh, we lost out, drop every reservation and try
			 * to only reserve this buffer, then start over if
			 * this succeeds.
			 */
			spin_lock(&glob->lru_lock);
151
			ttm_eu_backoff_reservation_locked(list, ticket);
152 153
			spin_unlock(&glob->lru_lock);
			ttm_eu_list_ref_sub(list);
154 155 156 157 158
			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
							       ticket);
			if (unlikely(ret != 0)) {
				if (ret == -EINTR)
					ret = -ERESTARTSYS;
159
				goto err_fini;
160
			}
161

162 163 164 165 166
			entry->reserved = true;
			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
				ret = -EBUSY;
				goto err;
			}
167
			goto retry;
168
		} else if (ret)
169
			goto err;
170 171 172

		entry->reserved = true;
		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
173 174
			ret = -EBUSY;
			goto err;
175 176
		}
	}
177

178
	ww_acquire_done(ticket);
179
	spin_lock(&glob->lru_lock);
180 181 182
	ttm_eu_del_from_lru_locked(list);
	spin_unlock(&glob->lru_lock);
	ttm_eu_list_ref_sub(list);
183
	return 0;
184 185

err:
186
	spin_lock(&glob->lru_lock);
187
	ttm_eu_backoff_reservation_locked(list, ticket);
188 189
	spin_unlock(&glob->lru_lock);
	ttm_eu_list_ref_sub(list);
190 191 192
err_fini:
	ww_acquire_done(ticket);
	ww_acquire_fini(ticket);
193
	return ret;
194 195 196
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);

197 198
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
				 struct list_head *list, void *sync_obj)
199 200
{
	struct ttm_validate_buffer *entry;
201 202 203 204
	struct ttm_buffer_object *bo;
	struct ttm_bo_global *glob;
	struct ttm_bo_device *bdev;
	struct ttm_bo_driver *driver;
205

206 207 208 209 210 211 212
	if (list_empty(list))
		return;

	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
	bdev = bo->bdev;
	driver = bdev->driver;
	glob = bo->glob;
213

214
	spin_lock(&glob->lru_lock);
215
	spin_lock(&bdev->fence_lock);
216 217 218 219

	list_for_each_entry(entry, list, head) {
		bo = entry->bo;
		entry->old_sync_obj = bo->sync_obj;
220
		bo->sync_obj = driver->sync_obj_ref(sync_obj);
221 222
		ttm_bo_add_to_lru(bo);
		ww_mutex_unlock(&bo->resv->lock);
223
		entry->reserved = false;
224 225
	}
	spin_unlock(&bdev->fence_lock);
226
	spin_unlock(&glob->lru_lock);
227
	ww_acquire_fini(ticket);
228 229 230 231

	list_for_each_entry(entry, list, head) {
		if (entry->old_sync_obj)
			driver->sync_obj_unref(&entry->old_sync_obj);
232 233 234
	}
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);