dma-resv.c 17.5 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * Based on bo.c which bears the following copyright notice,
 * but is dual licensed:
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

36
#include <linux/dma-resv.h>
37
#include <linux/export.h>
38
#include <linux/mm.h>
39
#include <linux/sched/mm.h>
40
#include <linux/mmu_notifier.h>
41

R
Rob Clark 已提交
42 43 44 45 46 47 48 49 50
/**
 * DOC: Reservation Object Overview
 *
 * The reservation object provides a mechanism to manage shared and
 * exclusive fences associated with a buffer.  A reservation object
 * can have attached one exclusive fence (normally associated with
 * write operations) or N shared fences (read operations).  The RCU
 * mechanism is used to protect read access to fences from locked
 * write-side updates.
51 52
 *
 * See struct dma_resv for more details.
R
Rob Clark 已提交
53 54
 */

55
DEFINE_WD_CLASS(reservation_ww_class);
56
EXPORT_SYMBOL(reservation_ww_class);
57

58
/**
59
 * dma_resv_list_alloc - allocate fence list
60 61
 * @shared_max: number of fences we need space for
 *
62
 * Allocate a new dma_resv_list and make sure to correctly initialize
63 64
 * shared_max.
 */
65
static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
66
{
67
	struct dma_resv_list *list;
68

C
Christian König 已提交
69
	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
70 71 72 73 74 75 76 77 78 79
	if (!list)
		return NULL;

	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
		sizeof(*list->shared);

	return list;
}

/**
80
 * dma_resv_list_free - free fence list
81 82
 * @list: list to free
 *
83
 * Free a dma_resv_list and make sure to drop all references.
84
 */
85
static void dma_resv_list_free(struct dma_resv_list *list)
86 87 88 89 90 91 92 93 94 95 96 97
{
	unsigned int i;

	if (!list)
		return;

	for (i = 0; i < list->shared_count; ++i)
		dma_fence_put(rcu_dereference_protected(list->shared[i], true));

	kfree_rcu(list, rcu);
}

98
/**
99
 * dma_resv_init - initialize a reservation object
100 101
 * @obj: the reservation object
 */
102
void dma_resv_init(struct dma_resv *obj)
103 104
{
	ww_mutex_init(&obj->lock, &reservation_ww_class);
105
	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
106

107 108 109
	RCU_INIT_POINTER(obj->fence, NULL);
	RCU_INIT_POINTER(obj->fence_excl, NULL);
}
110
EXPORT_SYMBOL(dma_resv_init);
111 112

/**
113
 * dma_resv_fini - destroys a reservation object
114 115
 * @obj: the reservation object
 */
116
void dma_resv_fini(struct dma_resv *obj)
117
{
118
	struct dma_resv_list *fobj;
119 120 121 122 123 124 125 126 127 128 129
	struct dma_fence *excl;

	/*
	 * This object should be dead and all references must have
	 * been released to it, so no need to be protected with rcu.
	 */
	excl = rcu_dereference_protected(obj->fence_excl, 1);
	if (excl)
		dma_fence_put(excl);

	fobj = rcu_dereference_protected(obj->fence, 1);
130
	dma_resv_list_free(fobj);
131 132
	ww_mutex_destroy(&obj->lock);
}
133
EXPORT_SYMBOL(dma_resv_fini);
134

R
Rob Clark 已提交
135
/**
136 137
 * dma_resv_reserve_shared - Reserve space to add shared fences to
 * a dma_resv.
R
Rob Clark 已提交
138
 * @obj: reservation object
139
 * @num_fences: number of fences we want to add
R
Rob Clark 已提交
140
 *
141
 * Should be called before dma_resv_add_shared_fence().  Must
142 143 144 145 146
 * be called with @obj locked through dma_resv_lock().
 *
 * Note that the preallocated slots need to be re-reserved if @obj is unlocked
 * at any time before calling dma_resv_add_shared_fence(). This is validated
 * when CONFIG_DEBUG_MUTEXES is enabled.
R
Rob Clark 已提交
147 148 149
 *
 * RETURNS
 * Zero for success, or -errno
150
 */
151
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
152
{
153
	struct dma_resv_list *old, *new;
154
	unsigned int i, j, k, max;
155

156
	dma_resv_assert_held(obj);
157

158
	old = dma_resv_shared_list(obj);
159
	if (old && old->shared_max) {
160
		if ((old->shared_count + num_fences) <= old->shared_max)
161
			return 0;
162
		max = max(old->shared_count + num_fences, old->shared_max * 2);
163
	} else {
164
		max = max(4ul, roundup_pow_of_two(num_fences));
165
	}
166

167
	new = dma_resv_list_alloc(max);
168 169
	if (!new)
		return -ENOMEM;
170 171 172 173 174 175 176

	/*
	 * no need to bump fence refcounts, rcu_read access
	 * requires the use of kref_get_unless_zero, and the
	 * references from the old struct are carried over to
	 * the new.
	 */
177 178
	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
		struct dma_fence *fence;
179

180
		fence = rcu_dereference_protected(old->shared[i],
181
						  dma_resv_held(obj));
182 183
		if (dma_fence_is_signaled(fence))
			RCU_INIT_POINTER(new->shared[--k], fence);
184
		else
185
			RCU_INIT_POINTER(new->shared[j++], fence);
186
	}
187
	new->shared_count = j;
188

189
	/*
190 191 192 193 194 195
	 * We are not changing the effective set of fences here so can
	 * merely update the pointer to the new array; both existing
	 * readers and new readers will see exactly the same set of
	 * active (unsignaled) shared fences. Individual fences and the
	 * old array are protected by RCU and so will not vanish under
	 * the gaze of the rcu_read_lock() readers.
196
	 */
197
	rcu_assign_pointer(obj->fence, new);
198

199
	if (!old)
200
		return 0;
201

202
	/* Drop the references to the signaled fences */
203
	for (i = k; i < max; ++i) {
204
		struct dma_fence *fence;
205

206
		fence = rcu_dereference_protected(new->shared[i],
207
						  dma_resv_held(obj));
208
		dma_fence_put(fence);
209 210
	}
	kfree_rcu(old, rcu);
211 212

	return 0;
213
}
214
EXPORT_SYMBOL(dma_resv_reserve_shared);
215

216 217 218 219 220 221 222 223 224 225 226
#ifdef CONFIG_DEBUG_MUTEXES
/**
 * dma_resv_reset_shared_max - reset shared fences for debugging
 * @obj: the dma_resv object to reset
 *
 * Reset the number of pre-reserved shared slots to test that drivers do
 * correct slot allocation using dma_resv_reserve_shared(). See also
 * &dma_resv_list.shared_max.
 */
void dma_resv_reset_shared_max(struct dma_resv *obj)
{
227
	struct dma_resv_list *fences = dma_resv_shared_list(obj);
228

229 230 231 232 233
	dma_resv_assert_held(obj);

	/* Test shared fence slot reservation */
	if (fences)
		fences->shared_max = fences->shared_count;
234
}
235
EXPORT_SYMBOL(dma_resv_reset_shared_max);
236 237
#endif

R
Rob Clark 已提交
238
/**
239
 * dma_resv_add_shared_fence - Add a fence to a shared slot
R
Rob Clark 已提交
240 241 242
 * @obj: the reservation object
 * @fence: the shared fence to add
 *
243
 * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
244
 * dma_resv_reserve_shared() has been called.
245 246
 *
 * See also &dma_resv.fence for a discussion of the semantics.
247
 */
248
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
249
{
250
	struct dma_resv_list *fobj;
251
	struct dma_fence *old;
252
	unsigned int i, count;
253

254 255
	dma_fence_get(fence);

256
	dma_resv_assert_held(obj);
257

258
	fobj = dma_resv_shared_list(obj);
259
	count = fobj->shared_count;
260

261 262
	write_seqcount_begin(&obj->seq);

263
	for (i = 0; i < count; ++i) {
264

265
		old = rcu_dereference_protected(fobj->shared[i],
266
						dma_resv_held(obj));
267 268
		if (old->context == fence->context ||
		    dma_fence_is_signaled(old))
269 270 271 272
			goto replace;
	}

	BUG_ON(fobj->shared_count >= fobj->shared_max);
273
	old = NULL;
274
	count++;
275 276 277

replace:
	RCU_INIT_POINTER(fobj->shared[i], fence);
278 279
	/* pointer update must be visible before we extend the shared_count */
	smp_store_mb(fobj->shared_count, count);
280 281

	write_seqcount_end(&obj->seq);
282
	dma_fence_put(old);
283
}
284
EXPORT_SYMBOL(dma_resv_add_shared_fence);
285

R
Rob Clark 已提交
286
/**
287
 * dma_resv_add_excl_fence - Add an exclusive fence.
R
Rob Clark 已提交
288
 * @obj: the reservation object
289
 * @fence: the exclusive fence to add
R
Rob Clark 已提交
290
 *
291 292 293
 * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
 * Note that this function replaces all fences attached to @obj, see also
 * &dma_resv.fence_excl for a discussion of the semantics.
R
Rob Clark 已提交
294
 */
295
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
296
{
297
	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
298
	struct dma_resv_list *old;
299 300
	u32 i = 0;

301
	dma_resv_assert_held(obj);
302

303
	old = dma_resv_shared_list(obj);
304
	if (old)
305 306 307
		i = old->shared_count;

	if (fence)
308
		dma_fence_get(fence);
309

310 311 312
	write_seqcount_begin(&obj->seq);
	/* write_seqcount_begin provides the necessary memory barrier */
	RCU_INIT_POINTER(obj->fence_excl, fence);
313
	if (old)
314 315
		old->shared_count = 0;
	write_seqcount_end(&obj->seq);
316 317 318

	/* inplace update, no shared fences */
	while (i--)
319
		dma_fence_put(rcu_dereference_protected(old->shared[i],
320
						dma_resv_held(obj)));
321

322
	dma_fence_put(old_fence);
323
}
324
EXPORT_SYMBOL(dma_resv_add_excl_fence);
325

326
/**
327 328 329 330 331 332
 * dma_resv_copy_fences - Copy all fences from src to dst.
 * @dst: the destination reservation object
 * @src: the source reservation object
 *
 * Copy all fences from src to dst. dst-lock must be held.
 */
333
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
334
{
335
	struct dma_resv_list *src_list, *dst_list;
336
	struct dma_fence *old, *new;
337
	unsigned int i;
338

339
	dma_resv_assert_held(dst);
340

341
	rcu_read_lock();
342
	src_list = dma_resv_shared_list(src);
343

344
retry:
345
	if (src_list) {
346
		unsigned int shared_count = src_list->shared_count;
347

348 349
		rcu_read_unlock();

350
		dst_list = dma_resv_list_alloc(shared_count);
351 352 353
		if (!dst_list)
			return -ENOMEM;

354
		rcu_read_lock();
355
		src_list = dma_resv_shared_list(src);
356
		if (!src_list || src_list->shared_count > shared_count) {
357 358 359 360 361
			kfree(dst_list);
			goto retry;
		}

		dst_list->shared_count = 0;
362
		for (i = 0; i < src_list->shared_count; ++i) {
363
			struct dma_fence __rcu **dst;
364 365 366 367 368 369 370 371
			struct dma_fence *fence;

			fence = rcu_dereference(src_list->shared[i]);
			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
				     &fence->flags))
				continue;

			if (!dma_fence_get_rcu(fence)) {
372
				dma_resv_list_free(dst_list);
373
				src_list = dma_resv_shared_list(src);
374 375 376 377 378 379 380 381
				goto retry;
			}

			if (dma_fence_is_signaled(fence)) {
				dma_fence_put(fence);
				continue;
			}

382 383
			dst = &dst_list->shared[dst_list->shared_count++];
			rcu_assign_pointer(*dst, fence);
384
		}
385 386 387 388
	} else {
		dst_list = NULL;
	}

389
	new = dma_fence_get_rcu_safe(&src->fence_excl);
390 391
	rcu_read_unlock();

392
	src_list = dma_resv_shared_list(dst);
393
	old = dma_resv_excl_fence(dst);
394

395 396 397 398 399
	write_seqcount_begin(&dst->seq);
	/* write_seqcount_begin provides the necessary memory barrier */
	RCU_INIT_POINTER(dst->fence_excl, new);
	RCU_INIT_POINTER(dst->fence, dst_list);
	write_seqcount_end(&dst->seq);
400

401
	dma_resv_list_free(src_list);
402 403 404 405
	dma_fence_put(old);

	return 0;
}
406
EXPORT_SYMBOL(dma_resv_copy_fences);
407

R
Rob Clark 已提交
408
/**
409
 * dma_resv_get_fences - Get an object's shared and exclusive
R
Rob Clark 已提交
410 411 412 413 414 415 416
 * fences without update side lock held
 * @obj: the reservation object
 * @pfence_excl: the returned exclusive fence (or NULL)
 * @pshared_count: the number of shared fences returned
 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
 * the required size, and must be freed by caller)
 *
417 418 419
 * Retrieve all fences from the reservation object. If the pointer for the
 * exclusive fence is not specified the fence is put into the array of the
 * shared fences as well. Returns either zero or -ENOMEM.
R
Rob Clark 已提交
420
 */
421 422 423
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
			unsigned int *pshared_count,
			struct dma_fence ***pshared)
424
{
425 426
	struct dma_fence **shared = NULL;
	struct dma_fence *fence_excl;
427 428
	unsigned int shared_count;
	int ret = 1;
429

430
	do {
431
		struct dma_resv_list *fobj;
432
		unsigned int i, seq;
433
		size_t sz = 0;
434

435
		shared_count = i = 0;
436 437

		rcu_read_lock();
438
		seq = read_seqcount_begin(&obj->seq);
439

440
		fence_excl = dma_resv_excl_fence(obj);
441
		if (fence_excl && !dma_fence_get_rcu(fence_excl))
442
			goto unlock;
443

444
		fobj = dma_resv_shared_list(obj);
445 446 447 448 449 450 451
		if (fobj)
			sz += sizeof(*shared) * fobj->shared_max;

		if (!pfence_excl && fence_excl)
			sz += sizeof(*shared);

		if (sz) {
452
			struct dma_fence **nshared;
453 454 455 456 457

			nshared = krealloc(shared, sz,
					   GFP_NOWAIT | __GFP_NOWARN);
			if (!nshared) {
				rcu_read_unlock();
458 459 460 461

				dma_fence_put(fence_excl);
				fence_excl = NULL;

462 463 464 465 466 467 468 469 470 471
				nshared = krealloc(shared, sz, GFP_KERNEL);
				if (nshared) {
					shared = nshared;
					continue;
				}

				ret = -ENOMEM;
				break;
			}
			shared = nshared;
472
			shared_count = fobj ? fobj->shared_count : 0;
473
			for (i = 0; i < shared_count; ++i) {
474
				shared[i] = rcu_dereference(fobj->shared[i]);
475
				if (!dma_fence_get_rcu(shared[i]))
476
					break;
477
			}
478
		}
479

480
		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
481
			while (i--)
482 483
				dma_fence_put(shared[i]);
			dma_fence_put(fence_excl);
484 485 486 487
			goto unlock;
		}

		ret = 0;
488 489
unlock:
		rcu_read_unlock();
490 491
	} while (ret);

492 493 494
	if (pfence_excl)
		*pfence_excl = fence_excl;
	else if (fence_excl)
Q
Qiang Yu 已提交
495
		shared[shared_count++] = fence_excl;
496

497
	if (!shared_count) {
498
		kfree(shared);
499
		shared = NULL;
500
	}
501 502 503

	*pshared_count = shared_count;
	*pshared = shared;
504 505
	return ret;
}
506
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
507

R
Rob Clark 已提交
508
/**
509
 * dma_resv_wait_timeout - Wait on reservation's objects
R
Rob Clark 已提交
510 511 512 513 514 515
 * shared and/or exclusive fences.
 * @obj: the reservation object
 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
 * @intr: if true, do interruptible wait
 * @timeout: timeout value in jiffies or zero to return immediately
 *
516 517
 * Callers are not required to hold specific locks, but maybe hold
 * dma_resv_lock() already
R
Rob Clark 已提交
518 519 520 521
 * RETURNS
 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
 * greater than zer on success.
 */
522 523
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
			   unsigned long timeout)
524
{
525
	long ret = timeout ? timeout : 1;
526 527
	unsigned int seq, shared_count;
	struct dma_fence *fence;
528
	int i;
529

530
retry:
531 532
	shared_count = 0;
	seq = read_seqcount_begin(&obj->seq);
533
	rcu_read_lock();
534
	i = -1;
535

536
	fence = dma_resv_excl_fence(obj);
537 538 539 540 541 542 543 544 545 546 547 548 549
	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
		if (!dma_fence_get_rcu(fence))
			goto unlock_retry;

		if (dma_fence_is_signaled(fence)) {
			dma_fence_put(fence);
			fence = NULL;
		}

	} else {
		fence = NULL;
	}

550
	if (wait_all) {
551
		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
552 553 554 555

		if (fobj)
			shared_count = fobj->shared_count;

556
		for (i = 0; !fence && i < shared_count; ++i) {
557
			struct dma_fence *lfence;
558

559
			lfence = rcu_dereference(fobj->shared[i]);
560 561
			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
				     &lfence->flags))
562 563
				continue;

564
			if (!dma_fence_get_rcu(lfence))
565 566
				goto unlock_retry;

567 568
			if (dma_fence_is_signaled(lfence)) {
				dma_fence_put(lfence);
569 570 571 572 573 574 575 576 577 578
				continue;
			}

			fence = lfence;
			break;
		}
	}

	rcu_read_unlock();
	if (fence) {
579 580 581 582 583
		if (read_seqcount_retry(&obj->seq, seq)) {
			dma_fence_put(fence);
			goto retry;
		}

584 585
		ret = dma_fence_wait_timeout(fence, intr, ret);
		dma_fence_put(fence);
586 587 588 589 590 591 592 593 594
		if (ret > 0 && wait_all && (i + 1 < shared_count))
			goto retry;
	}
	return ret;

unlock_retry:
	rcu_read_unlock();
	goto retry;
}
595
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
596 597


598
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
599
{
600
	struct dma_fence *fence, *lfence = passed_fence;
601 602
	int ret = 1;

603 604
	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
		fence = dma_fence_get_rcu(lfence);
605 606 607
		if (!fence)
			return -1;

608 609
		ret = !!dma_fence_is_signaled(fence);
		dma_fence_put(fence);
610 611 612 613
	}
	return ret;
}

R
Rob Clark 已提交
614
/**
615 616
 * dma_resv_test_signaled - Test if a reservation object's fences have been
 * signaled.
R
Rob Clark 已提交
617 618 619 620
 * @obj: the reservation object
 * @test_all: if true, test all fences, otherwise only test the exclusive
 * fence
 *
621
 * Callers are not required to hold specific locks, but maybe hold
622 623
 * dma_resv_lock() already.
 *
R
Rob Clark 已提交
624
 * RETURNS
625 626
 *
 * True if all fences signaled, else false.
R
Rob Clark 已提交
627
 */
628
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
629
{
630 631
	struct dma_fence *fence;
	unsigned int seq;
632
	int ret;
633

634
	rcu_read_lock();
635
retry:
636
	ret = true;
637
	seq = read_seqcount_begin(&obj->seq);
638 639

	if (test_all) {
640
		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
641
		unsigned int i, shared_count;
642

643
		shared_count = fobj ? fobj->shared_count : 0;
644
		for (i = 0; i < shared_count; ++i) {
645
			fence = rcu_dereference(fobj->shared[i]);
646
			ret = dma_resv_test_signaled_single(fence);
647
			if (ret < 0)
648
				goto retry;
649 650 651 652 653
			else if (!ret)
				break;
		}
	}

654 655 656 657 658
	fence = dma_resv_excl_fence(obj);
	if (ret && fence) {
		ret = dma_resv_test_signaled_single(fence);
		if (ret < 0)
			goto retry;
659 660 661

	}

662 663 664
	if (read_seqcount_retry(&obj->seq, seq))
		goto retry;

665 666 667
	rcu_read_unlock();
	return ret;
}
668
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711

#if IS_ENABLED(CONFIG_LOCKDEP)
static int __init dma_resv_lockdep(void)
{
	struct mm_struct *mm = mm_alloc();
	struct ww_acquire_ctx ctx;
	struct dma_resv obj;
	struct address_space mapping;
	int ret;

	if (!mm)
		return -ENOMEM;

	dma_resv_init(&obj);
	address_space_init_once(&mapping);

	mmap_read_lock(mm);
	ww_acquire_init(&ctx, &reservation_ww_class);
	ret = dma_resv_lock(&obj, &ctx);
	if (ret == -EDEADLK)
		dma_resv_lock_slow(&obj, &ctx);
	fs_reclaim_acquire(GFP_KERNEL);
	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
	i_mmap_lock_write(&mapping);
	i_mmap_unlock_write(&mapping);
#ifdef CONFIG_MMU_NOTIFIER
	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
	__dma_fence_might_wait();
	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
#else
	__dma_fence_might_wait();
#endif
	fs_reclaim_release(GFP_KERNEL);
	ww_mutex_unlock(&obj.lock);
	ww_acquire_fini(&ctx);
	mmap_read_unlock(mm);

	mmput(mm);

	return 0;
}
subsys_initcall(dma_resv_lockdep);
#endif