dma-resv.c 21.2 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * Based on bo.c which bears the following copyright notice,
 * but is dual licensed:
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

36
#include <linux/dma-resv.h>
37
#include <linux/dma-fence-array.h>
38
#include <linux/export.h>
39
#include <linux/mm.h>
40
#include <linux/sched/mm.h>
41
#include <linux/mmu_notifier.h>
42
#include <linux/seq_file.h>
43

R
Rob Clark 已提交
44 45 46
/**
 * DOC: Reservation Object Overview
 *
47 48 49 50 51 52
 * The reservation object provides a mechanism to manage a container of
 * dma_fence object associated with a resource. A reservation object
 * can have any number of fences attaches to it. Each fence carries an usage
 * parameter determining how the operation represented by the fence is using the
 * resource. The RCU mechanism is used to protect read access to fences from
 * locked write-side updates.
53 54
 *
 * See struct dma_resv for more details.
R
Rob Clark 已提交
55 56
 */

57
DEFINE_WD_CLASS(reservation_ww_class);
58
EXPORT_SYMBOL(reservation_ww_class);
59

60 61 62
/* Mask for the lower fence pointer bits */
#define DMA_RESV_LIST_MASK	0x3

63 64
struct dma_resv_list {
	struct rcu_head rcu;
65 66
	u32 num_fences, max_fences;
	struct dma_fence __rcu *table[];
67 68
};

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/* Extract the fence and usage flags from an RCU protected entry in the list. */
static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
				struct dma_resv *resv, struct dma_fence **fence,
				enum dma_resv_usage *usage)
{
	long tmp;

	tmp = (long)rcu_dereference_check(list->table[index],
					  resv ? dma_resv_held(resv) : true);
	*fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
	if (usage)
		*usage = tmp & DMA_RESV_LIST_MASK;
}

/* Set the fence and usage flags at the specific index in the list. */
static void dma_resv_list_set(struct dma_resv_list *list,
			      unsigned int index,
			      struct dma_fence *fence,
			      enum dma_resv_usage usage)
{
	long tmp = ((long)fence) | usage;

	RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
}

/*
95
 * Allocate a new dma_resv_list and make sure to correctly initialize
96
 * max_fences.
97
 */
98
static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
99
{
100
	struct dma_resv_list *list;
101

102
	list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
103 104 105
	if (!list)
		return NULL;

106 107
	list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
		sizeof(*list->table);
108 109 110 111

	return list;
}

112
/* Free a dma_resv_list and make sure to drop all references. */
113
static void dma_resv_list_free(struct dma_resv_list *list)
114 115 116 117 118 119
{
	unsigned int i;

	if (!list)
		return;

120 121
	for (i = 0; i < list->num_fences; ++i) {
		struct dma_fence *fence;
122

123 124 125
		dma_resv_list_entry(list, i, NULL, &fence, NULL);
		dma_fence_put(fence);
	}
126 127 128
	kfree_rcu(list, rcu);
}

129
/**
130
 * dma_resv_init - initialize a reservation object
131 132
 * @obj: the reservation object
 */
133
void dma_resv_init(struct dma_resv *obj)
134 135
{
	ww_mutex_init(&obj->lock, &reservation_ww_class);
136
	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
137

138
	RCU_INIT_POINTER(obj->fences, NULL);
139
}
140
EXPORT_SYMBOL(dma_resv_init);
141 142

/**
143
 * dma_resv_fini - destroys a reservation object
144 145
 * @obj: the reservation object
 */
146
void dma_resv_fini(struct dma_resv *obj)
147 148 149 150 151
{
	/*
	 * This object should be dead and all references must have
	 * been released to it, so no need to be protected with rcu.
	 */
152
	dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
153 154
	ww_mutex_destroy(&obj->lock);
}
155
EXPORT_SYMBOL(dma_resv_fini);
156

157 158
/* Dereference the fences while ensuring RCU rules */
static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
159
{
160
	return rcu_dereference_check(obj->fences, dma_resv_held(obj));
161 162
}

R
Rob Clark 已提交
163
/**
164
 * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
R
Rob Clark 已提交
165
 * @obj: reservation object
166
 * @num_fences: number of fences we want to add
R
Rob Clark 已提交
167
 *
168 169
 * Should be called before dma_resv_add_fence().  Must be called with @obj
 * locked through dma_resv_lock().
170 171
 *
 * Note that the preallocated slots need to be re-reserved if @obj is unlocked
172 173
 * at any time before calling dma_resv_add_fence(). This is validated when
 * CONFIG_DEBUG_MUTEXES is enabled.
R
Rob Clark 已提交
174 175 176
 *
 * RETURNS
 * Zero for success, or -errno
177
 */
178
int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
179
{
180
	struct dma_resv_list *old, *new;
181
	unsigned int i, j, k, max;
182

183
	dma_resv_assert_held(obj);
184

185 186 187
	old = dma_resv_fences_list(obj);
	if (old && old->max_fences) {
		if ((old->num_fences + num_fences) <= old->max_fences)
188
			return 0;
189
		max = max(old->num_fences + num_fences, old->max_fences * 2);
190
	} else {
191
		max = max(4ul, roundup_pow_of_two(num_fences));
192
	}
193

194
	new = dma_resv_list_alloc(max);
195 196
	if (!new)
		return -ENOMEM;
197 198 199 200 201 202 203

	/*
	 * no need to bump fence refcounts, rcu_read access
	 * requires the use of kref_get_unless_zero, and the
	 * references from the old struct are carried over to
	 * the new.
	 */
204 205
	for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
		enum dma_resv_usage usage;
206
		struct dma_fence *fence;
207

208
		dma_resv_list_entry(old, i, obj, &fence, &usage);
209
		if (dma_fence_is_signaled(fence))
210
			RCU_INIT_POINTER(new->table[--k], fence);
211
		else
212
			dma_resv_list_set(new, j++, fence, usage);
213
	}
214
	new->num_fences = j;
215

216
	/*
217 218 219
	 * We are not changing the effective set of fences here so can
	 * merely update the pointer to the new array; both existing
	 * readers and new readers will see exactly the same set of
220
	 * active (unsignaled) fences. Individual fences and the
221 222
	 * old array are protected by RCU and so will not vanish under
	 * the gaze of the rcu_read_lock() readers.
223
	 */
224
	rcu_assign_pointer(obj->fences, new);
225

226
	if (!old)
227
		return 0;
228

229
	/* Drop the references to the signaled fences */
230
	for (i = k; i < max; ++i) {
231
		struct dma_fence *fence;
232

233
		fence = rcu_dereference_protected(new->table[i],
234
						  dma_resv_held(obj));
235
		dma_fence_put(fence);
236 237
	}
	kfree_rcu(old, rcu);
238 239

	return 0;
240
}
241
EXPORT_SYMBOL(dma_resv_reserve_fences);
242

243 244
#ifdef CONFIG_DEBUG_MUTEXES
/**
245
 * dma_resv_reset_max_fences - reset fences for debugging
246 247
 * @obj: the dma_resv object to reset
 *
248
 * Reset the number of pre-reserved fence slots to test that drivers do
249
 * correct slot allocation using dma_resv_reserve_fences(). See also
250
 * &dma_resv_list.max_fences.
251
 */
252
void dma_resv_reset_max_fences(struct dma_resv *obj)
253
{
254
	struct dma_resv_list *fences = dma_resv_fences_list(obj);
255

256 257
	dma_resv_assert_held(obj);

258
	/* Test fence slot reservation */
259
	if (fences)
260
		fences->max_fences = fences->num_fences;
261
}
262
EXPORT_SYMBOL(dma_resv_reset_max_fences);
263 264
#endif

R
Rob Clark 已提交
265
/**
266
 * dma_resv_add_fence - Add a fence to the dma_resv obj
R
Rob Clark 已提交
267
 * @obj: the reservation object
268 269
 * @fence: the fence to add
 * @usage: how the fence is used, see enum dma_resv_usage
R
Rob Clark 已提交
270
 *
271
 * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
272
 * dma_resv_reserve_fences() has been called.
273 274
 *
 * See also &dma_resv.fence for a discussion of the semantics.
275
 */
276 277
void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
			enum dma_resv_usage usage)
278
{
279
	struct dma_resv_list *fobj;
280
	struct dma_fence *old;
281
	unsigned int i, count;
282

283 284
	dma_fence_get(fence);

285
	dma_resv_assert_held(obj);
286

287 288 289 290 291
	/* Drivers should not add containers here, instead add each fence
	 * individually.
	 */
	WARN_ON(dma_fence_is_container(fence));

292 293
	fobj = dma_resv_fences_list(obj);
	count = fobj->num_fences;
294

295 296
	write_seqcount_begin(&obj->seq);

297
	for (i = 0; i < count; ++i) {
298
		enum dma_resv_usage old_usage;
299

300 301
		dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
		if ((old->context == fence->context && old_usage >= usage) ||
302
		    dma_fence_is_signaled(old))
303 304 305
			goto replace;
	}

306
	BUG_ON(fobj->num_fences >= fobj->max_fences);
307
	old = NULL;
308
	count++;
309 310

replace:
311 312 313
	dma_resv_list_set(fobj, i, fence, usage);
	/* pointer update must be visible before we extend the num_fences */
	smp_store_mb(fobj->num_fences, count);
314 315

	write_seqcount_end(&obj->seq);
316
	dma_fence_put(old);
317
}
318
EXPORT_SYMBOL(dma_resv_add_fence);
319

320 321 322 323 324
/**
 * dma_resv_replace_fences - replace fences in the dma_resv obj
 * @obj: the reservation object
 * @context: the context of the fences to replace
 * @replacement: the new fence to use instead
325
 * @usage: how the new fence is used, see enum dma_resv_usage
326 327 328 329 330 331 332 333 334
 *
 * Replace fences with a specified context with a new fence. Only valid if the
 * operation represented by the original fence has no longer access to the
 * resources represented by the dma_resv object when the new fence completes.
 *
 * And example for using this is replacing a preemption fence with a page table
 * update fence which makes the resource inaccessible.
 */
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
335 336
			     struct dma_fence *replacement,
			     enum dma_resv_usage usage)
337 338 339 340 341 342
{
	struct dma_resv_list *list;
	unsigned int i;

	dma_resv_assert_held(obj);

343
	list = dma_resv_fences_list(obj);
344
	write_seqcount_begin(&obj->seq);
345 346
	for (i = 0; list && i < list->num_fences; ++i) {
		struct dma_fence *old;
347

348
		dma_resv_list_entry(list, i, obj, &old, NULL);
349 350 351
		if (old->context != context)
			continue;

352
		dma_resv_list_set(list, i, replacement, usage);
353 354 355 356 357 358
		dma_fence_put(old);
	}
	write_seqcount_end(&obj->seq);
}
EXPORT_SYMBOL(dma_resv_replace_fences);

359
/* Restart the unlocked iteration by initializing the cursor object. */
360 361 362
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{
	cursor->seq = read_seqcount_begin(&cursor->obj->seq);
363 364 365 366 367
	cursor->index = 0;
	cursor->num_fences = 0;
	cursor->fences = dma_resv_fences_list(cursor->obj);
	if (cursor->fences)
		cursor->num_fences = cursor->fences->num_fences;
368 369 370
	cursor->is_restarted = true;
}

371
/* Walk to the next not signaled fence and grab a reference to it */
372 373
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
{
374 375
	if (!cursor->fences)
		return;
376 377 378 379 380

	do {
		/* Drop the reference from the previous round */
		dma_fence_put(cursor->fence);

381
		if (cursor->index >= cursor->num_fences) {
382 383 384 385
			cursor->fence = NULL;
			break;

		}
386 387 388 389

		dma_resv_list_entry(cursor->fences, cursor->index++,
				    cursor->obj, &cursor->fence,
				    &cursor->fence_usage);
390
		cursor->fence = dma_fence_get_rcu(cursor->fence);
391 392 393 394 395
		if (!cursor->fence)
			break;

		if (!dma_fence_is_signaled(cursor->fence) &&
		    cursor->usage >= cursor->fence_usage)
396 397 398 399 400 401 402 403
			break;
	} while (true);
}

/**
 * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
 * @cursor: the cursor with the current position
 *
404 405 406 407 408 409
 * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
 *
 * Beware that the iterator can be restarted.  Code which accumulates statistics
 * or similar needs to check for this with dma_resv_iter_is_restarted(). For
 * this reason prefer the locked dma_resv_iter_first() whenver possible.
 *
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
 * Returns the first fence from an unlocked dma_resv obj.
 */
struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
{
	rcu_read_lock();
	do {
		dma_resv_iter_restart_unlocked(cursor);
		dma_resv_iter_walk_unlocked(cursor);
	} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
	rcu_read_unlock();

	return cursor->fence;
}
EXPORT_SYMBOL(dma_resv_iter_first_unlocked);

/**
 * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
 * @cursor: the cursor with the current position
 *
429 430 431 432
 * Beware that the iterator can be restarted.  Code which accumulates statistics
 * or similar needs to check for this with dma_resv_iter_is_restarted(). For
 * this reason prefer the locked dma_resv_iter_next() whenver possible.
 *
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
 * Returns the next fence from an unlocked dma_resv obj.
 */
struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
{
	bool restart;

	rcu_read_lock();
	cursor->is_restarted = false;
	restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
	do {
		if (restart)
			dma_resv_iter_restart_unlocked(cursor);
		dma_resv_iter_walk_unlocked(cursor);
		restart = true;
	} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
	rcu_read_unlock();

	return cursor->fence;
}
EXPORT_SYMBOL(dma_resv_iter_next_unlocked);

454 455 456 457
/**
 * dma_resv_iter_first - first fence from a locked dma_resv object
 * @cursor: cursor to record the current position
 *
458 459
 * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
 *
460 461 462 463 464 465 466 467 468 469
 * Return the first fence in the dma_resv object while holding the
 * &dma_resv.lock.
 */
struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
{
	struct dma_fence *fence;

	dma_resv_assert_held(cursor->obj);

	cursor->index = 0;
470
	cursor->fences = dma_resv_fences_list(cursor->obj);
471

472
	fence = dma_resv_iter_next(cursor);
473 474 475 476 477 478 479 480 481 482 483 484 485 486
	cursor->is_restarted = true;
	return fence;
}
EXPORT_SYMBOL_GPL(dma_resv_iter_first);

/**
 * dma_resv_iter_next - next fence from a locked dma_resv object
 * @cursor: cursor to record the current position
 *
 * Return the next fences from the dma_resv object while holding the
 * &dma_resv.lock.
 */
struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
{
487
	struct dma_fence *fence;
488 489 490 491 492

	dma_resv_assert_held(cursor->obj);

	cursor->is_restarted = false;

493 494 495 496 497 498 499 500 501 502
	do {
		if (!cursor->fences ||
		    cursor->index >= cursor->fences->num_fences)
			return NULL;

		dma_resv_list_entry(cursor->fences, cursor->index++,
				    cursor->obj, &fence, &cursor->fence_usage);
	} while (cursor->fence_usage > cursor->usage);

	return fence;
503 504 505
}
EXPORT_SYMBOL_GPL(dma_resv_iter_next);

506
/**
507 508 509 510 511 512
 * dma_resv_copy_fences - Copy all fences from src to dst.
 * @dst: the destination reservation object
 * @src: the source reservation object
 *
 * Copy all fences from src to dst. dst-lock must be held.
 */
513
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
514
{
515 516
	struct dma_resv_iter cursor;
	struct dma_resv_list *list;
517
	struct dma_fence *f;
518

519
	dma_resv_assert_held(dst);
520

521
	list = NULL;
522

523
	dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ);
524
	dma_resv_for_each_fence_unlocked(&cursor, f) {
525

526 527
		if (dma_resv_iter_is_restarted(&cursor)) {
			dma_resv_list_free(list);
528

529 530 531 532
			list = dma_resv_list_alloc(cursor.num_fences);
			if (!list) {
				dma_resv_iter_end(&cursor);
				return -ENOMEM;
533
			}
534
			list->num_fences = 0;
535 536
		}

537
		dma_fence_get(f);
538 539
		dma_resv_list_set(list, list->num_fences++, f,
				  dma_resv_iter_usage(&cursor));
540 541
	}
	dma_resv_iter_end(&cursor);
542

543
	write_seqcount_begin(&dst->seq);
544
	list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
545
	write_seqcount_end(&dst->seq);
546

547
	dma_resv_list_free(list);
548 549
	return 0;
}
550
EXPORT_SYMBOL(dma_resv_copy_fences);
551

R
Rob Clark 已提交
552
/**
553
 * dma_resv_get_fences - Get an object's fences
R
Rob Clark 已提交
554 555
 * fences without update side lock held
 * @obj: the reservation object
556
 * @usage: controls which fences to include, see enum dma_resv_usage.
557 558 559
 * @num_fences: the number of fences returned
 * @fences: the array of fence ptrs returned (array is krealloc'd to the
 * required size, and must be freed by caller)
R
Rob Clark 已提交
560
 *
561 562
 * Retrieve all fences from the reservation object.
 * Returns either zero or -ENOMEM.
R
Rob Clark 已提交
563
 */
564
int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
565
			unsigned int *num_fences, struct dma_fence ***fences)
566
{
567 568
	struct dma_resv_iter cursor;
	struct dma_fence *fence;
569

570 571
	*num_fences = 0;
	*fences = NULL;
572

573
	dma_resv_iter_begin(&cursor, obj, usage);
574
	dma_resv_for_each_fence_unlocked(&cursor, fence) {
575

576 577
		if (dma_resv_iter_is_restarted(&cursor)) {
			unsigned int count;
578

579 580
			while (*num_fences)
				dma_fence_put((*fences)[--(*num_fences)]);
581

582
			count = cursor.num_fences + 1;
583

584
			/* Eventually re-allocate the array */
585
			*fences = krealloc_array(*fences, count,
586 587
						 sizeof(void *),
						 GFP_KERNEL);
588
			if (count && !*fences) {
589 590
				dma_resv_iter_end(&cursor);
				return -ENOMEM;
591
			}
592
		}
593

594
		(*fences)[(*num_fences)++] = dma_fence_get(fence);
595
	}
596
	dma_resv_iter_end(&cursor);
597

598
	return 0;
599
}
600
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
601

602 603 604
/**
 * dma_resv_get_singleton - Get a single fence for all the fences
 * @obj: the reservation object
605
 * @usage: controls which fences to include, see enum dma_resv_usage.
606 607 608 609 610 611 612 613 614 615 616
 * @fence: the resulting fence
 *
 * Get a single fence representing all the fences inside the resv object.
 * Returns either 0 for success or -ENOMEM.
 *
 * Warning: This can't be used like this when adding the fence back to the resv
 * object since that can lead to stack corruption when finalizing the
 * dma_fence_array.
 *
 * Returns 0 on success and negative error values on failure.
 */
617
int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
618 619 620 621 622 623 624
			   struct dma_fence **fence)
{
	struct dma_fence_array *array;
	struct dma_fence **fences;
	unsigned count;
	int r;

625
	r = dma_resv_get_fences(obj, usage, &count, &fences);
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
        if (r)
		return r;

	if (count == 0) {
		*fence = NULL;
		return 0;
	}

	if (count == 1) {
		*fence = fences[0];
		kfree(fences);
		return 0;
	}

	array = dma_fence_array_create(count, fences,
				       dma_fence_context_alloc(1),
				       1, false);
	if (!array) {
		while (count--)
			dma_fence_put(fences[count]);
		kfree(fences);
		return -ENOMEM;
	}

	*fence = &array->base;
	return 0;
}
EXPORT_SYMBOL_GPL(dma_resv_get_singleton);

R
Rob Clark 已提交
655
/**
656
 * dma_resv_wait_timeout - Wait on reservation's objects fences
R
Rob Clark 已提交
657
 * @obj: the reservation object
658
 * @usage: controls which fences to include, see enum dma_resv_usage.
R
Rob Clark 已提交
659 660 661
 * @intr: if true, do interruptible wait
 * @timeout: timeout value in jiffies or zero to return immediately
 *
662 663
 * Callers are not required to hold specific locks, but maybe hold
 * dma_resv_lock() already
R
Rob Clark 已提交
664 665 666 667
 * RETURNS
 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
 * greater than zer on success.
 */
668 669
long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
			   bool intr, unsigned long timeout)
670
{
671
	long ret = timeout ? timeout : 1;
672
	struct dma_resv_iter cursor;
673
	struct dma_fence *fence;
674

675
	dma_resv_iter_begin(&cursor, obj, usage);
676
	dma_resv_for_each_fence_unlocked(&cursor, fence) {
677

678 679 680 681
		ret = dma_fence_wait_timeout(fence, intr, ret);
		if (ret <= 0) {
			dma_resv_iter_end(&cursor);
			return ret;
682 683
		}
	}
684
	dma_resv_iter_end(&cursor);
685 686 687

	return ret;
}
688
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
689 690


R
Rob Clark 已提交
691
/**
692 693
 * dma_resv_test_signaled - Test if a reservation object's fences have been
 * signaled.
R
Rob Clark 已提交
694
 * @obj: the reservation object
695
 * @usage: controls which fences to include, see enum dma_resv_usage.
R
Rob Clark 已提交
696
 *
697
 * Callers are not required to hold specific locks, but maybe hold
698 699
 * dma_resv_lock() already.
 *
R
Rob Clark 已提交
700
 * RETURNS
701 702
 *
 * True if all fences signaled, else false.
R
Rob Clark 已提交
703
 */
704
bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
705
{
706
	struct dma_resv_iter cursor;
707
	struct dma_fence *fence;
708

709
	dma_resv_iter_begin(&cursor, obj, usage);
710 711 712
	dma_resv_for_each_fence_unlocked(&cursor, fence) {
		dma_resv_iter_end(&cursor);
		return false;
713
	}
714 715
	dma_resv_iter_end(&cursor);
	return true;
716
}
717
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
718

719 720 721 722 723 724 725 726 727 728
/**
 * dma_resv_describe - Dump description of the resv object into seq_file
 * @obj: the reservation object
 * @seq: the seq_file to dump the description into
 *
 * Dump a textual description of the fences inside an dma_resv object into the
 * seq_file.
 */
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
729
	static const char *usage[] = { "kernel", "write", "read" };
730 731 732
	struct dma_resv_iter cursor;
	struct dma_fence *fence;

733
	dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
734
		seq_printf(seq, "\t%s fence:",
735
			   usage[dma_resv_iter_usage(&cursor)]);
736 737 738 739 740
		dma_fence_describe(fence, seq);
	}
}
EXPORT_SYMBOL_GPL(dma_resv_describe);

741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
#if IS_ENABLED(CONFIG_LOCKDEP)
static int __init dma_resv_lockdep(void)
{
	struct mm_struct *mm = mm_alloc();
	struct ww_acquire_ctx ctx;
	struct dma_resv obj;
	struct address_space mapping;
	int ret;

	if (!mm)
		return -ENOMEM;

	dma_resv_init(&obj);
	address_space_init_once(&mapping);

	mmap_read_lock(mm);
	ww_acquire_init(&ctx, &reservation_ww_class);
	ret = dma_resv_lock(&obj, &ctx);
	if (ret == -EDEADLK)
		dma_resv_lock_slow(&obj, &ctx);
	fs_reclaim_acquire(GFP_KERNEL);
	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
	i_mmap_lock_write(&mapping);
	i_mmap_unlock_write(&mapping);
#ifdef CONFIG_MMU_NOTIFIER
	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
	__dma_fence_might_wait();
	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
#else
	__dma_fence_might_wait();
#endif
	fs_reclaim_release(GFP_KERNEL);
	ww_mutex_unlock(&obj.lock);
	ww_acquire_fini(&ctx);
	mmap_read_unlock(mm);

	mmput(mm);

	return 0;
}
subsys_initcall(dma_resv_lockdep);
#endif