dma-resv.c 17.1 KB
Newer Older
1
// SPDX-License-Identifier: MIT
2
/*
3
 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * Based on bo.c which bears the following copyright notice,
 * but is dual licensed:
 *
 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

36
#include <linux/dma-resv.h>
37
#include <linux/export.h>
38
#include <linux/mm.h>
39
#include <linux/sched/mm.h>
40
#include <linux/mmu_notifier.h>
41

R
Rob Clark 已提交
42 43 44 45 46 47 48 49 50 51 52
/**
 * DOC: Reservation Object Overview
 *
 * The reservation object provides a mechanism to manage shared and
 * exclusive fences associated with a buffer.  A reservation object
 * can have attached one exclusive fence (normally associated with
 * write operations) or N shared fences (read operations).  The RCU
 * mechanism is used to protect read access to fences from locked
 * write-side updates.
 */

53
DEFINE_WD_CLASS(reservation_ww_class);
54
EXPORT_SYMBOL(reservation_ww_class);
55

56
/**
57
 * dma_resv_list_alloc - allocate fence list
58 59
 * @shared_max: number of fences we need space for
 *
60
 * Allocate a new dma_resv_list and make sure to correctly initialize
61 62
 * shared_max.
 */
63
static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
64
{
65
	struct dma_resv_list *list;
66

C
Christian König 已提交
67
	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
68 69 70 71 72 73 74 75 76 77
	if (!list)
		return NULL;

	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
		sizeof(*list->shared);

	return list;
}

/**
78
 * dma_resv_list_free - free fence list
79 80
 * @list: list to free
 *
81
 * Free a dma_resv_list and make sure to drop all references.
82
 */
83
static void dma_resv_list_free(struct dma_resv_list *list)
84 85 86 87 88 89 90 91 92 93 94 95
{
	unsigned int i;

	if (!list)
		return;

	for (i = 0; i < list->shared_count; ++i)
		dma_fence_put(rcu_dereference_protected(list->shared[i], true));

	kfree_rcu(list, rcu);
}

96
/**
97
 * dma_resv_init - initialize a reservation object
98 99
 * @obj: the reservation object
 */
100
void dma_resv_init(struct dma_resv *obj)
101 102
{
	ww_mutex_init(&obj->lock, &reservation_ww_class);
103
	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
104

105 106 107
	RCU_INIT_POINTER(obj->fence, NULL);
	RCU_INIT_POINTER(obj->fence_excl, NULL);
}
108
EXPORT_SYMBOL(dma_resv_init);
109 110

/**
111
 * dma_resv_fini - destroys a reservation object
112 113
 * @obj: the reservation object
 */
114
void dma_resv_fini(struct dma_resv *obj)
115
{
116
	struct dma_resv_list *fobj;
117 118 119 120 121 122 123 124 125 126 127
	struct dma_fence *excl;

	/*
	 * This object should be dead and all references must have
	 * been released to it, so no need to be protected with rcu.
	 */
	excl = rcu_dereference_protected(obj->fence_excl, 1);
	if (excl)
		dma_fence_put(excl);

	fobj = rcu_dereference_protected(obj->fence, 1);
128
	dma_resv_list_free(fobj);
129 130
	ww_mutex_destroy(&obj->lock);
}
131
EXPORT_SYMBOL(dma_resv_fini);
132

R
Rob Clark 已提交
133
/**
134 135
 * dma_resv_reserve_shared - Reserve space to add shared fences to
 * a dma_resv.
R
Rob Clark 已提交
136
 * @obj: reservation object
137
 * @num_fences: number of fences we want to add
R
Rob Clark 已提交
138
 *
139
 * Should be called before dma_resv_add_shared_fence().  Must
R
Rob Clark 已提交
140 141 142 143
 * be called with obj->lock held.
 *
 * RETURNS
 * Zero for success, or -errno
144
 */
145
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
146
{
147
	struct dma_resv_list *old, *new;
148
	unsigned int i, j, k, max;
149

150
	dma_resv_assert_held(obj);
151

152
	old = dma_resv_get_list(obj);
153 154

	if (old && old->shared_max) {
155
		if ((old->shared_count + num_fences) <= old->shared_max)
156
			return 0;
157
		max = max(old->shared_count + num_fences, old->shared_max * 2);
158
	} else {
159
		max = max(4ul, roundup_pow_of_two(num_fences));
160
	}
161

162
	new = dma_resv_list_alloc(max);
163 164
	if (!new)
		return -ENOMEM;
165 166 167 168 169 170 171

	/*
	 * no need to bump fence refcounts, rcu_read access
	 * requires the use of kref_get_unless_zero, and the
	 * references from the old struct are carried over to
	 * the new.
	 */
172 173
	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
		struct dma_fence *fence;
174

175
		fence = rcu_dereference_protected(old->shared[i],
176
						  dma_resv_held(obj));
177 178
		if (dma_fence_is_signaled(fence))
			RCU_INIT_POINTER(new->shared[--k], fence);
179
		else
180
			RCU_INIT_POINTER(new->shared[j++], fence);
181
	}
182
	new->shared_count = j;
183

184
	/*
185 186 187 188 189 190
	 * We are not changing the effective set of fences here so can
	 * merely update the pointer to the new array; both existing
	 * readers and new readers will see exactly the same set of
	 * active (unsignaled) shared fences. Individual fences and the
	 * old array are protected by RCU and so will not vanish under
	 * the gaze of the rcu_read_lock() readers.
191
	 */
192
	rcu_assign_pointer(obj->fence, new);
193

194
	if (!old)
195
		return 0;
196

197
	/* Drop the references to the signaled fences */
198
	for (i = k; i < max; ++i) {
199
		struct dma_fence *fence;
200

201
		fence = rcu_dereference_protected(new->shared[i],
202
						  dma_resv_held(obj));
203
		dma_fence_put(fence);
204 205
	}
	kfree_rcu(old, rcu);
206 207

	return 0;
208
}
209
EXPORT_SYMBOL(dma_resv_reserve_shared);
210

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
#ifdef CONFIG_DEBUG_MUTEXES
/**
 * dma_resv_reset_shared_max - reset shared fences for debugging
 * @obj: the dma_resv object to reset
 *
 * Reset the number of pre-reserved shared slots to test that drivers do
 * correct slot allocation using dma_resv_reserve_shared(). See also
 * &dma_resv_list.shared_max.
 */
void dma_resv_reset_shared_max(struct dma_resv *obj)
{
	/* Test shared fence slot reservation */
	if (rcu_access_pointer(obj->fence)) {
		struct dma_resv_list *fence = dma_resv_get_list(obj);

		fence->shared_max = fence->shared_count;
	}
}
229
EXPORT_SYMBOL(dma_resv_reset_shared_max);
230 231
#endif

R
Rob Clark 已提交
232
/**
233
 * dma_resv_add_shared_fence - Add a fence to a shared slot
R
Rob Clark 已提交
234 235 236
 * @obj: the reservation object
 * @fence: the shared fence to add
 *
237
 * Add a fence to a shared slot, obj->lock must be held, and
238
 * dma_resv_reserve_shared() has been called.
239
 */
240
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
241
{
242
	struct dma_resv_list *fobj;
243
	struct dma_fence *old;
244
	unsigned int i, count;
245

246 247
	dma_fence_get(fence);

248
	dma_resv_assert_held(obj);
249

250
	fobj = dma_resv_get_list(obj);
251
	count = fobj->shared_count;
252

253 254
	write_seqcount_begin(&obj->seq);

255
	for (i = 0; i < count; ++i) {
256

257
		old = rcu_dereference_protected(fobj->shared[i],
258
						dma_resv_held(obj));
259 260
		if (old->context == fence->context ||
		    dma_fence_is_signaled(old))
261 262 263 264
			goto replace;
	}

	BUG_ON(fobj->shared_count >= fobj->shared_max);
265
	old = NULL;
266
	count++;
267 268 269

replace:
	RCU_INIT_POINTER(fobj->shared[i], fence);
270 271
	/* pointer update must be visible before we extend the shared_count */
	smp_store_mb(fobj->shared_count, count);
272 273

	write_seqcount_end(&obj->seq);
274
	dma_fence_put(old);
275
}
276
EXPORT_SYMBOL(dma_resv_add_shared_fence);
277

R
Rob Clark 已提交
278
/**
279
 * dma_resv_add_excl_fence - Add an exclusive fence.
R
Rob Clark 已提交
280 281 282 283 284
 * @obj: the reservation object
 * @fence: the shared fence to add
 *
 * Add a fence to the exclusive slot.  The obj->lock must be held.
 */
285
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
286
{
287 288
	struct dma_fence *old_fence = dma_resv_get_excl(obj);
	struct dma_resv_list *old;
289 290
	u32 i = 0;

291
	dma_resv_assert_held(obj);
292

293
	old = dma_resv_get_list(obj);
294
	if (old)
295 296 297
		i = old->shared_count;

	if (fence)
298
		dma_fence_get(fence);
299

300 301 302
	write_seqcount_begin(&obj->seq);
	/* write_seqcount_begin provides the necessary memory barrier */
	RCU_INIT_POINTER(obj->fence_excl, fence);
303
	if (old)
304 305
		old->shared_count = 0;
	write_seqcount_end(&obj->seq);
306 307 308

	/* inplace update, no shared fences */
	while (i--)
309
		dma_fence_put(rcu_dereference_protected(old->shared[i],
310
						dma_resv_held(obj)));
311

312
	dma_fence_put(old_fence);
313
}
314
EXPORT_SYMBOL(dma_resv_add_excl_fence);
315

316
/**
317 318 319 320 321 322
 * dma_resv_copy_fences - Copy all fences from src to dst.
 * @dst: the destination reservation object
 * @src: the source reservation object
 *
 * Copy all fences from src to dst. dst-lock must be held.
 */
323
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
324
{
325
	struct dma_resv_list *src_list, *dst_list;
326
	struct dma_fence *old, *new;
327
	unsigned int i;
328

329
	dma_resv_assert_held(dst);
330

331
	rcu_read_lock();
332
	src_list = rcu_dereference(src->fence);
333

334
retry:
335
	if (src_list) {
336
		unsigned int shared_count = src_list->shared_count;
337

338 339
		rcu_read_unlock();

340
		dst_list = dma_resv_list_alloc(shared_count);
341 342 343
		if (!dst_list)
			return -ENOMEM;

344
		rcu_read_lock();
345 346
		src_list = rcu_dereference(src->fence);
		if (!src_list || src_list->shared_count > shared_count) {
347 348 349 350 351
			kfree(dst_list);
			goto retry;
		}

		dst_list->shared_count = 0;
352
		for (i = 0; i < src_list->shared_count; ++i) {
353
			struct dma_fence __rcu **dst;
354 355 356 357 358 359 360 361
			struct dma_fence *fence;

			fence = rcu_dereference(src_list->shared[i]);
			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
				     &fence->flags))
				continue;

			if (!dma_fence_get_rcu(fence)) {
362
				dma_resv_list_free(dst_list);
363
				src_list = rcu_dereference(src->fence);
364 365 366 367 368 369 370 371
				goto retry;
			}

			if (dma_fence_is_signaled(fence)) {
				dma_fence_put(fence);
				continue;
			}

372 373
			dst = &dst_list->shared[dst_list->shared_count++];
			rcu_assign_pointer(*dst, fence);
374
		}
375 376 377 378
	} else {
		dst_list = NULL;
	}

379
	new = dma_fence_get_rcu_safe(&src->fence_excl);
380 381
	rcu_read_unlock();

382 383
	src_list = dma_resv_get_list(dst);
	old = dma_resv_get_excl(dst);
384

385 386 387 388 389
	write_seqcount_begin(&dst->seq);
	/* write_seqcount_begin provides the necessary memory barrier */
	RCU_INIT_POINTER(dst->fence_excl, new);
	RCU_INIT_POINTER(dst->fence, dst_list);
	write_seqcount_end(&dst->seq);
390

391
	dma_resv_list_free(src_list);
392 393 394 395
	dma_fence_put(old);

	return 0;
}
396
EXPORT_SYMBOL(dma_resv_copy_fences);
397

R
Rob Clark 已提交
398
/**
399
 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
R
Rob Clark 已提交
400 401 402 403 404 405 406
 * fences without update side lock held
 * @obj: the reservation object
 * @pfence_excl: the returned exclusive fence (or NULL)
 * @pshared_count: the number of shared fences returned
 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
 * the required size, and must be freed by caller)
 *
407 408 409
 * Retrieve all fences from the reservation object. If the pointer for the
 * exclusive fence is not specified the fence is put into the array of the
 * shared fences as well. Returns either zero or -ENOMEM.
R
Rob Clark 已提交
410
 */
411 412
int dma_resv_get_fences_rcu(struct dma_resv *obj,
			    struct dma_fence **pfence_excl,
413
			    unsigned int *pshared_count,
414
			    struct dma_fence ***pshared)
415
{
416 417
	struct dma_fence **shared = NULL;
	struct dma_fence *fence_excl;
418 419
	unsigned int shared_count;
	int ret = 1;
420

421
	do {
422
		struct dma_resv_list *fobj;
423
		unsigned int i, seq;
424
		size_t sz = 0;
425

426
		shared_count = i = 0;
427 428

		rcu_read_lock();
429
		seq = read_seqcount_begin(&obj->seq);
430

431
		fence_excl = rcu_dereference(obj->fence_excl);
432
		if (fence_excl && !dma_fence_get_rcu(fence_excl))
433
			goto unlock;
434

435
		fobj = rcu_dereference(obj->fence);
436 437 438 439 440 441 442
		if (fobj)
			sz += sizeof(*shared) * fobj->shared_max;

		if (!pfence_excl && fence_excl)
			sz += sizeof(*shared);

		if (sz) {
443
			struct dma_fence **nshared;
444 445 446 447 448

			nshared = krealloc(shared, sz,
					   GFP_NOWAIT | __GFP_NOWARN);
			if (!nshared) {
				rcu_read_unlock();
449 450 451 452

				dma_fence_put(fence_excl);
				fence_excl = NULL;

453 454 455 456 457 458 459 460 461 462
				nshared = krealloc(shared, sz, GFP_KERNEL);
				if (nshared) {
					shared = nshared;
					continue;
				}

				ret = -ENOMEM;
				break;
			}
			shared = nshared;
463
			shared_count = fobj ? fobj->shared_count : 0;
464
			for (i = 0; i < shared_count; ++i) {
465
				shared[i] = rcu_dereference(fobj->shared[i]);
466
				if (!dma_fence_get_rcu(shared[i]))
467
					break;
468
			}
469
		}
470

471
		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
472
			while (i--)
473 474
				dma_fence_put(shared[i]);
			dma_fence_put(fence_excl);
475 476 477 478
			goto unlock;
		}

		ret = 0;
479 480
unlock:
		rcu_read_unlock();
481 482
	} while (ret);

483 484 485
	if (pfence_excl)
		*pfence_excl = fence_excl;
	else if (fence_excl)
Q
Qiang Yu 已提交
486
		shared[shared_count++] = fence_excl;
487

488
	if (!shared_count) {
489
		kfree(shared);
490
		shared = NULL;
491
	}
492 493 494

	*pshared_count = shared_count;
	*pshared = shared;
495 496
	return ret;
}
497
EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
498

R
Rob Clark 已提交
499
/**
500
 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
R
Rob Clark 已提交
501 502 503 504 505 506 507 508 509 510
 * shared and/or exclusive fences.
 * @obj: the reservation object
 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
 * @intr: if true, do interruptible wait
 * @timeout: timeout value in jiffies or zero to return immediately
 *
 * RETURNS
 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
 * greater than zer on success.
 */
511 512 513
long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
			       bool wait_all, bool intr,
			       unsigned long timeout)
514
{
515
	long ret = timeout ? timeout : 1;
516 517
	unsigned int seq, shared_count;
	struct dma_fence *fence;
518
	int i;
519

520
retry:
521 522
	shared_count = 0;
	seq = read_seqcount_begin(&obj->seq);
523
	rcu_read_lock();
524
	i = -1;
525

526
	fence = rcu_dereference(obj->fence_excl);
527 528 529 530 531 532 533 534 535 536 537 538 539
	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
		if (!dma_fence_get_rcu(fence))
			goto unlock_retry;

		if (dma_fence_is_signaled(fence)) {
			dma_fence_put(fence);
			fence = NULL;
		}

	} else {
		fence = NULL;
	}

540
	if (wait_all) {
541 542 543 544 545
		struct dma_resv_list *fobj = rcu_dereference(obj->fence);

		if (fobj)
			shared_count = fobj->shared_count;

546
		for (i = 0; !fence && i < shared_count; ++i) {
547
			struct dma_fence *lfence;
548

549
			lfence = rcu_dereference(fobj->shared[i]);
550 551
			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
				     &lfence->flags))
552 553
				continue;

554
			if (!dma_fence_get_rcu(lfence))
555 556
				goto unlock_retry;

557 558
			if (dma_fence_is_signaled(lfence)) {
				dma_fence_put(lfence);
559 560 561 562 563 564 565 566 567 568
				continue;
			}

			fence = lfence;
			break;
		}
	}

	rcu_read_unlock();
	if (fence) {
569 570 571 572 573
		if (read_seqcount_retry(&obj->seq, seq)) {
			dma_fence_put(fence);
			goto retry;
		}

574 575
		ret = dma_fence_wait_timeout(fence, intr, ret);
		dma_fence_put(fence);
576 577 578 579 580 581 582 583 584
		if (ret > 0 && wait_all && (i + 1 < shared_count))
			goto retry;
	}
	return ret;

unlock_retry:
	rcu_read_unlock();
	goto retry;
}
585
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
586 587


588
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
589
{
590
	struct dma_fence *fence, *lfence = passed_fence;
591 592
	int ret = 1;

593 594
	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
		fence = dma_fence_get_rcu(lfence);
595 596 597
		if (!fence)
			return -1;

598 599
		ret = !!dma_fence_is_signaled(fence);
		dma_fence_put(fence);
600 601 602 603
	}
	return ret;
}

R
Rob Clark 已提交
604
/**
605
 * dma_resv_test_signaled_rcu - Test if a reservation object's
R
Rob Clark 已提交
606 607 608 609 610 611 612 613
 * fences have been signaled.
 * @obj: the reservation object
 * @test_all: if true, test all fences, otherwise only test the exclusive
 * fence
 *
 * RETURNS
 * true if all fences signaled, else false
 */
614
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
615
{
616
	unsigned int seq, shared_count;
617
	int ret;
618

619
	rcu_read_lock();
620
retry:
621
	ret = true;
622 623
	shared_count = 0;
	seq = read_seqcount_begin(&obj->seq);
624 625

	if (test_all) {
626
		struct dma_resv_list *fobj = rcu_dereference(obj->fence);
627
		unsigned int i;
628 629 630 631

		if (fobj)
			shared_count = fobj->shared_count;

632
		for (i = 0; i < shared_count; ++i) {
633
			struct dma_fence *fence;
634

635
			fence = rcu_dereference(fobj->shared[i]);
636
			ret = dma_resv_test_signaled_single(fence);
637
			if (ret < 0)
638
				goto retry;
639 640 641 642
			else if (!ret)
				break;
		}

643
		if (read_seqcount_retry(&obj->seq, seq))
644
			goto retry;
645 646
	}

647 648 649 650 651 652 653 654 655 656 657 658 659
	if (!shared_count) {
		struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);

		if (fence_excl) {
			ret = dma_resv_test_signaled_single(fence_excl);
			if (ret < 0)
				goto retry;

			if (read_seqcount_retry(&obj->seq, seq))
				goto retry;
		}
	}

660 661 662
	rcu_read_unlock();
	return ret;
}
663
EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706

#if IS_ENABLED(CONFIG_LOCKDEP)
static int __init dma_resv_lockdep(void)
{
	struct mm_struct *mm = mm_alloc();
	struct ww_acquire_ctx ctx;
	struct dma_resv obj;
	struct address_space mapping;
	int ret;

	if (!mm)
		return -ENOMEM;

	dma_resv_init(&obj);
	address_space_init_once(&mapping);

	mmap_read_lock(mm);
	ww_acquire_init(&ctx, &reservation_ww_class);
	ret = dma_resv_lock(&obj, &ctx);
	if (ret == -EDEADLK)
		dma_resv_lock_slow(&obj, &ctx);
	fs_reclaim_acquire(GFP_KERNEL);
	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
	i_mmap_lock_write(&mapping);
	i_mmap_unlock_write(&mapping);
#ifdef CONFIG_MMU_NOTIFIER
	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
	__dma_fence_might_wait();
	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
#else
	__dma_fence_might_wait();
#endif
	fs_reclaim_release(GFP_KERNEL);
	ww_mutex_unlock(&obj.lock);
	ww_acquire_fini(&ctx);
	mmap_read_unlock(mm);

	mmput(mm);

	return 0;
}
subsys_initcall(dma_resv_lockdep);
#endif