i915_gem_userptr.c 20.7 KB
Newer Older
1
/*
2
 * SPDX-License-Identifier: MIT
3
 *
4
 * Copyright © 2012-2014 Intel Corporation
5 6 7 8 9 10
 */

#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
11
#include <linux/sched/mm.h>
12

13 14
#include <drm/i915_drm.h>

15 16
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
17 18 19
#include "i915_trace.h"
#include "intel_drv.h"

20 21
struct i915_mm_struct {
	struct mm_struct *mm;
22
	struct drm_i915_private *i915;
23 24 25 26 27 28
	struct i915_mmu_notifier *mn;
	struct hlist_node node;
	struct kref kref;
	struct work_struct work;
};

29 30 31 32 33 34 35
#if defined(CONFIG_MMU_NOTIFIER)
#include <linux/interval_tree.h>

struct i915_mmu_notifier {
	spinlock_t lock;
	struct hlist_node node;
	struct mmu_notifier mn;
36
	struct rb_root_cached objects;
37
	struct i915_mm_struct *mm;
38 39 40
};

struct i915_mmu_object {
41
	struct i915_mmu_notifier *mn;
42
	struct drm_i915_gem_object *obj;
43 44 45
	struct interval_tree_node it;
};

46
static void add_object(struct i915_mmu_object *mo)
47
{
48 49
	GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
	interval_tree_insert(&mo->it, &mo->mn->objects);
50 51
}

52
static void del_object(struct i915_mmu_object *mo)
53
{
54
	if (RB_EMPTY_NODE(&mo->it.rb))
55
		return;
56

57 58
	interval_tree_remove(&mo->it, &mo->mn->objects);
	RB_CLEAR_NODE(&mo->it.rb);
59 60
}

61 62
static void
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
63
{
64 65 66 67 68 69 70 71 72 73 74 75 76 77
	struct i915_mmu_object *mo = obj->userptr.mmu_object;

	/*
	 * During mm_invalidate_range we need to cancel any userptr that
	 * overlaps the range being invalidated. Doing so requires the
	 * struct_mutex, and that risks recursion. In order to cause
	 * recursion, the user must alias the userptr address space with
	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
	 * to invalidate that mmaping, mm_invalidate_range is called with
	 * the userptr address *and* the struct_mutex held.  To prevent that
	 * we set a flag under the i915_mmu_notifier spinlock to indicate
	 * whether this object is valid.
	 */
	if (!mo)
78 79
		return;

80 81 82 83 84 85
	spin_lock(&mo->mn->lock);
	if (value)
		add_object(mo);
	else
		del_object(mo);
	spin_unlock(&mo->mn->lock);
86 87
}

88 89 90
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
				  const struct mmu_notifier_range *range)
91
{
92 93
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
94
	struct interval_tree_node *it;
95
	struct mutex *unlock = NULL;
96
	unsigned long end;
97
	int ret = 0;
98

99
	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
100
		return 0;
101 102

	/* interval ranges are inclusive, but invalidate range is exclusive */
103
	end = range->end - 1;
104 105

	spin_lock(&mn->lock);
106
	it = interval_tree_iter_first(&mn->objects, range->start, end);
107
	while (it) {
108 109
		struct drm_i915_gem_object *obj;

110
		if (!mmu_notifier_range_blockable(range)) {
111 112
			ret = -EAGAIN;
			break;
113
		}
114 115 116

		/*
		 * The mmu_object is released late when destroying the
117 118 119 120 121 122 123 124
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
125 126 127 128 129 130 131 132 133 134 135 136 137
		obj = container_of(it, struct i915_mmu_object, it)->obj;
		if (!kref_get_unless_zero(&obj->base.refcount)) {
			it = interval_tree_iter_next(it, range->start, end);
			continue;
		}
		spin_unlock(&mn->lock);

		if (!unlock) {
			unlock = &mn->mm->i915->drm.struct_mutex;

			switch (mutex_trylock_recursive(unlock)) {
			default:
			case MUTEX_TRYLOCK_FAILED:
138
				if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
					i915_gem_object_put(obj);
					return -EINTR;
				}
				/* fall through */
			case MUTEX_TRYLOCK_SUCCESS:
				break;

			case MUTEX_TRYLOCK_RECURSIVE:
				unlock = ERR_PTR(-EEXIST);
				break;
			}
		}

		ret = i915_gem_object_unbind(obj);
		if (ret == 0)
			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
		i915_gem_object_put(obj);
		if (ret)
			goto unlock;
158

159 160 161 162 163 164 165 166
		spin_lock(&mn->lock);

		/*
		 * As we do not (yet) protect the mmu from concurrent insertion
		 * over this range, there is no guarantee that this search will
		 * terminate given a pathologic workload.
		 */
		it = interval_tree_iter_first(&mn->objects, range->start, end);
167
	}
168
	spin_unlock(&mn->lock);
169

170 171 172 173 174
unlock:
	if (!IS_ERR_OR_NULL(unlock))
		mutex_unlock(unlock);

	return ret;
175

176 177 178
}

static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
179
	.invalidate_range_start = userptr_mn_invalidate_range_start,
180 181 182
};

static struct i915_mmu_notifier *
183
i915_mmu_notifier_create(struct i915_mm_struct *mm)
184
{
185
	struct i915_mmu_notifier *mn;
186

187 188
	mn = kmalloc(sizeof(*mn), GFP_KERNEL);
	if (mn == NULL)
189 190
		return ERR_PTR(-ENOMEM);

191 192
	spin_lock_init(&mn->lock);
	mn->mn.ops = &i915_gem_userptr_notifier;
193
	mn->objects = RB_ROOT_CACHED;
194
	mn->mm = mm;
195 196

	return mn;
197 198 199 200 201
}

static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
202
	struct i915_mmu_object *mo;
203

204 205
	mo = fetch_and_zero(&obj->userptr.mmu_object);
	if (!mo)
206 207
		return;

208 209 210
	spin_lock(&mo->mn->lock);
	del_object(mo);
	spin_unlock(&mo->mn->lock);
211 212 213 214 215 216
	kfree(mo);
}

static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm)
{
217 218
	struct i915_mmu_notifier *mn;
	int err = 0;
219 220 221 222 223

	mn = mm->mn;
	if (mn)
		return mn;

224
	mn = i915_mmu_notifier_create(mm);
225 226 227
	if (IS_ERR(mn))
		err = PTR_ERR(mn);

228
	down_write(&mm->mm->mmap_sem);
229
	mutex_lock(&mm->i915->mm_lock);
230 231 232 233 234 235 236
	if (mm->mn == NULL && !err) {
		/* Protected by mmap_sem (write-lock) */
		err = __mmu_notifier_register(&mn->mn, mm->mm);
		if (!err) {
			/* Protected by mm_lock */
			mm->mn = fetch_and_zero(&mn);
		}
237 238 239 240 241
	} else if (mm->mn) {
		/*
		 * Someone else raced and successfully installed the mmu
		 * notifier, we can cancel our own errors.
		 */
242
		err = 0;
243
	}
244
	mutex_unlock(&mm->i915->mm_lock);
245 246
	up_write(&mm->mm->mmap_sem);

247
	if (mn && !IS_ERR(mn))
248 249 250
		kfree(mn);

	return err ? ERR_PTR(err) : mm->mn;
251 252 253 254 255 256
}

static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
				    unsigned flags)
{
257 258
	struct i915_mmu_notifier *mn;
	struct i915_mmu_object *mo;
259 260 261 262

	if (flags & I915_USERPTR_UNSYNCHRONIZED)
		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;

263 264
	if (WARN_ON(obj->userptr.mm == NULL))
		return -EINVAL;
265

266 267 268
	mn = i915_mmu_notifier_find(obj->userptr.mm);
	if (IS_ERR(mn))
		return PTR_ERR(mn);
269

270
	mo = kzalloc(sizeof(*mo), GFP_KERNEL);
271
	if (!mo)
272
		return -ENOMEM;
273

274 275
	mo->mn = mn;
	mo->obj = obj;
276 277
	mo->it.start = obj->userptr.ptr;
	mo->it.last = obj->userptr.ptr + obj->base.size - 1;
278
	RB_CLEAR_NODE(&mo->it.rb);
279 280

	obj->userptr.mmu_object = mo;
281
	return 0;
282 283 284 285 286 287 288 289
}

static void
i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
		       struct mm_struct *mm)
{
	if (mn == NULL)
		return;
290

291
	mmu_notifier_unregister(&mn->mn, mm);
292 293 294 295 296
	kfree(mn);
}

#else

297 298 299 300 301
static void
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
{
}

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
}

static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
				    unsigned flags)
{
	if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
		return -ENODEV;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	return 0;
}
319 320 321 322 323 324 325

static void
i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
		       struct mm_struct *mm)
{
}

326 327
#endif

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
static struct i915_mm_struct *
__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
{
	struct i915_mm_struct *mm;

	/* Protected by dev_priv->mm_lock */
	hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
		if (mm->mm == real)
			return mm;

	return NULL;
}

static int
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_mm_struct *mm;
	int ret = 0;

	/* During release of the GEM object we hold the struct_mutex. This
	 * precludes us from calling mmput() at that time as that may be
	 * the last reference and so call exit_mmap(). exit_mmap() will
	 * attempt to reap the vma, and if we were holding a GTT mmap
	 * would then call drm_gem_vm_close() and attempt to reacquire
	 * the struct mutex. So in order to avoid that recursion, we have
	 * to defer releasing the mm reference until after we drop the
	 * struct_mutex, i.e. we need to schedule a worker to do the clean
	 * up.
	 */
	mutex_lock(&dev_priv->mm_lock);
	mm = __i915_mm_struct_find(dev_priv, current->mm);
	if (mm == NULL) {
		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
		if (mm == NULL) {
			ret = -ENOMEM;
			goto out;
		}

		kref_init(&mm->kref);
368
		mm->i915 = to_i915(obj->base.dev);
369 370

		mm->mm = current->mm;
V
Vegard Nossum 已提交
371
		mmgrab(current->mm);
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402

		mm->mn = NULL;

		/* Protected by dev_priv->mm_lock */
		hash_add(dev_priv->mm_structs,
			 &mm->node, (unsigned long)mm->mm);
	} else
		kref_get(&mm->kref);

	obj->userptr.mm = mm;
out:
	mutex_unlock(&dev_priv->mm_lock);
	return ret;
}

static void
__i915_mm_struct_free__worker(struct work_struct *work)
{
	struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
	i915_mmu_notifier_free(mm->mn, mm->mm);
	mmdrop(mm->mm);
	kfree(mm);
}

static void
__i915_mm_struct_free(struct kref *kref)
{
	struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);

	/* Protected by dev_priv->mm_lock */
	hash_del(&mm->node);
403
	mutex_unlock(&mm->i915->mm_lock);
404 405

	INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
406
	queue_work(mm->i915->mm.userptr_wq, &mm->work);
407 408 409 410 411 412 413 414 415 416 417 418 419 420
}

static void
i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
{
	if (obj->userptr.mm == NULL)
		return;

	kref_put_mutex(&obj->userptr.mm->kref,
		       __i915_mm_struct_free,
		       &to_i915(obj->base.dev)->mm_lock);
	obj->userptr.mm = NULL;
}

421 422 423 424 425 426
struct get_pages_work {
	struct work_struct work;
	struct drm_i915_gem_object *obj;
	struct task_struct *task;
};

427
static struct sg_table *
428 429
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
			       struct page **pvec, int num_pages)
I
Imre Deak 已提交
430
{
431 432
	unsigned int max_segment = i915_sg_segment_size();
	struct sg_table *st;
M
Matthew Auld 已提交
433
	unsigned int sg_page_sizes;
I
Imre Deak 已提交
434 435
	int ret;

436 437 438 439 440 441 442 443 444 445 446
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		return ERR_PTR(-ENOMEM);

alloc_table:
	ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
					  0, num_pages << PAGE_SHIFT,
					  max_segment,
					  GFP_KERNEL);
	if (ret) {
		kfree(st);
447
		return ERR_PTR(ret);
448
	}
I
Imre Deak 已提交
449

450
	ret = i915_gem_gtt_prepare_pages(obj, st);
I
Imre Deak 已提交
451
	if (ret) {
452 453 454 455 456 457 458 459
		sg_free_table(st);

		if (max_segment > PAGE_SIZE) {
			max_segment = PAGE_SIZE;
			goto alloc_table;
		}

		kfree(st);
460
		return ERR_PTR(ret);
I
Imre Deak 已提交
461 462
	}

M
Matthew Auld 已提交
463
	sg_page_sizes = i915_sg_page_sizes(st->sgl);
464

M
Matthew Auld 已提交
465
	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
466

467
	return st;
I
Imre Deak 已提交
468 469
}

470 471 472 473 474
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
	struct get_pages_work *work = container_of(_work, typeof(*work), work);
	struct drm_i915_gem_object *obj = work->obj;
475
	const int npages = obj->base.size >> PAGE_SHIFT;
476 477 478 479 480 481
	struct page **pvec;
	int pinned, ret;

	ret = -ENOMEM;
	pinned = 0;

482
	pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
483
	if (pvec != NULL) {
484
		struct mm_struct *mm = obj->userptr.mm->mm;
485 486
		unsigned int flags = 0;

487
		if (!i915_gem_object_is_readonly(obj))
488
			flags |= FOLL_WRITE;
489

490
		ret = -EFAULT;
V
Vegard Nossum 已提交
491
		if (mmget_not_zero(mm)) {
492 493 494 495 496 497
			down_read(&mm->mmap_sem);
			while (pinned < npages) {
				ret = get_user_pages_remote
					(work->task, mm,
					 obj->userptr.ptr + pinned * PAGE_SIZE,
					 npages - pinned,
498
					 flags,
499
					 pvec + pinned, NULL, NULL);
500 501 502 503 504 505 506
				if (ret < 0)
					break;

				pinned += ret;
			}
			up_read(&mm->mmap_sem);
			mmput(mm);
507 508 509
		}
	}

510
	mutex_lock(&obj->mm.lock);
511
	if (obj->userptr.work == &work->work) {
512 513
		struct sg_table *pages = ERR_PTR(ret);

514
		if (pinned == npages) {
515 516
			pages = __i915_gem_userptr_alloc_pages(obj, pvec,
							       npages);
517
			if (!IS_ERR(pages)) {
518
				pinned = 0;
519
				pages = NULL;
520
			}
521
		}
522 523

		obj->userptr.work = ERR_CAST(pages);
524 525
		if (IS_ERR(pages))
			__i915_gem_userptr_set_active(obj, false);
526
	}
527
	mutex_unlock(&obj->mm.lock);
528

529
	release_pages(pvec, pinned);
M
Michal Hocko 已提交
530
	kvfree(pvec);
531

C
Chris Wilson 已提交
532
	i915_gem_object_put(obj);
533 534 535 536
	put_task_struct(work->task);
	kfree(work);
}

537
static struct sg_table *
538
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
{
	struct get_pages_work *work;

	/* Spawn a worker so that we can acquire the
	 * user pages without holding our mutex. Access
	 * to the user pages requires mmap_sem, and we have
	 * a strict lock ordering of mmap_sem, struct_mutex -
	 * we already hold struct_mutex here and so cannot
	 * call gup without encountering a lock inversion.
	 *
	 * Userspace will keep on repeating the operation
	 * (thanks to EAGAIN) until either we hit the fast
	 * path or the worker completes. If the worker is
	 * cancelled or superseded, the task is still run
	 * but the results ignored. (This leads to
	 * complications that we may have a stray object
	 * refcount that we need to be wary of when
	 * checking for existing objects during creation.)
	 * If the worker encounters an error, it reports
	 * that error back to this function through
	 * obj->userptr.work = ERR_PTR.
	 */
	work = kmalloc(sizeof(*work), GFP_KERNEL);
	if (work == NULL)
563
		return ERR_PTR(-ENOMEM);
564 565 566

	obj->userptr.work = &work->work;

567
	work->obj = i915_gem_object_get(obj);
568 569 570 571 572

	work->task = current;
	get_task_struct(work->task);

	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
573
	queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
574

575
	return ERR_PTR(-EAGAIN);
576 577
}

578
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
579 580
{
	const int num_pages = obj->base.size >> PAGE_SHIFT;
581
	struct mm_struct *mm = obj->userptr.mm->mm;
582
	struct page **pvec;
583
	struct sg_table *pages;
584
	bool active;
585
	int pinned;
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602

	/* If userspace should engineer that these pages are replaced in
	 * the vma between us binding this page into the GTT and completion
	 * of rendering... Their loss. If they change the mapping of their
	 * pages they need to create a new bo to point to the new vma.
	 *
	 * However, that still leaves open the possibility of the vma
	 * being copied upon fork. Which falls under the same userspace
	 * synchronisation issue as a regular bo, except that this time
	 * the process may not be expecting that a particular piece of
	 * memory is tied to the GPU.
	 *
	 * Fortunately, we can hook into the mmu_notifier in order to
	 * discard the page references prior to anything nasty happening
	 * to the vma (discard or cloning) which should prevent the more
	 * egregious cases from causing harm.
	 */
603 604

	if (obj->userptr.work) {
605
		/* active flag should still be held for the pending work */
606
		if (IS_ERR(obj->userptr.work))
607
			return PTR_ERR(obj->userptr.work);
608
		else
609
			return -EAGAIN;
610
	}
611

612 613 614
	pvec = NULL;
	pinned = 0;

615
	if (mm == current->mm) {
M
Michal Hocko 已提交
616
		pvec = kvmalloc_array(num_pages, sizeof(struct page *),
617
				      GFP_KERNEL |
618 619 620 621 622
				      __GFP_NORETRY |
				      __GFP_NOWARN);
		if (pvec) /* defer to worker if malloc fails */
			pinned = __get_user_pages_fast(obj->userptr.ptr,
						       num_pages,
623
						       !i915_gem_object_is_readonly(obj),
624
						       pvec);
625
	}
626 627

	active = false;
628 629 630 631 632 633 634
	if (pinned < 0) {
		pages = ERR_PTR(pinned);
		pinned = 0;
	} else if (pinned < num_pages) {
		pages = __i915_gem_userptr_get_pages_schedule(obj);
		active = pages == ERR_PTR(-EAGAIN);
	} else {
635
		pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
636
		active = !IS_ERR(pages);
637
	}
638 639 640 641
	if (active)
		__i915_gem_userptr_set_active(obj, true);

	if (IS_ERR(pages))
642
		release_pages(pvec, pinned);
M
Michal Hocko 已提交
643
	kvfree(pvec);
644

645
	return PTR_ERR_OR_ZERO(pages);
646 647 648
}

static void
649 650
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
			   struct sg_table *pages)
651
{
652 653
	struct sgt_iter sgt_iter;
	struct page *page;
654

655 656
	/* Cancel any inflight work and force them to restart their gup */
	obj->userptr.work = NULL;
657
	__i915_gem_userptr_set_active(obj, false);
658 659
	if (!pages)
		return;
660

661
	__i915_gem_object_release_shmem(obj, pages, true);
662
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
663

664
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
665
		if (obj->mm.dirty)
666 667 668
			set_page_dirty(page);

		mark_page_accessed(page);
669
		put_page(page);
670
	}
C
Chris Wilson 已提交
671
	obj->mm.dirty = false;
672

673 674
	sg_free_table(pages);
	kfree(pages);
675 676 677 678 679 680
}

static void
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
	i915_gem_userptr_release__mmu_notifier(obj);
681
	i915_gem_userptr_release__mm_struct(obj);
682 683 684 685 686
}

static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
{
687
	if (obj->userptr.mmu_object)
688 689 690 691 692 693
		return 0;

	return i915_gem_userptr_init__mmu_notifier(obj, 0);
}

static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
694
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
695 696
		 I915_GEM_OBJECT_IS_SHRINKABLE |
		 I915_GEM_OBJECT_ASYNC_CANCEL,
697 698
	.get_pages = i915_gem_userptr_get_pages,
	.put_pages = i915_gem_userptr_put_pages,
699
	.dmabuf_export = i915_gem_userptr_dmabuf_export,
700 701 702
	.release = i915_gem_userptr_release,
};

703
/*
704 705 706 707 708 709
 * Creates a new mm object that wraps some normal memory from the process
 * context - user memory.
 *
 * We impose several restrictions upon the memory being mapped
 * into the GPU.
 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
710
 * 2. It must be normal system memory, not a pointer into another map of IO
711
 *    space (e.g. it must not be a GTT mmapping of another object).
712
 * 3. We only allow a bo as large as we could in theory map into the GTT,
713
 *    that is we limit the size to the total size of the GTT.
714
 * 4. The bo is marked as being snoopable. The backing pages are left
715 716 717 718 719 720 721 722 723 724 725 726 727 728
 *    accessible directly by the CPU, but reads and writes by the GPU may
 *    incur the cost of a snoop (unless you have an LLC architecture).
 *
 * Synchronisation between multiple users and the GPU is left to userspace
 * through the normal set-domain-ioctl. The kernel will enforce that the
 * GPU relinquishes the VMA before it is returned back to the system
 * i.e. upon free(), munmap() or process termination. However, the userspace
 * malloc() library may not immediately relinquish the VMA after free() and
 * instead reuse it whilst the GPU is still reading and writing to the VMA.
 * Caveat emptor.
 *
 * Also note, that the object created here is not currently a "first class"
 * object, in that several ioctls are banned. These are the CPU access
 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
729 730 731 732
 * direct access via your pointer rather than use those ioctls. Another
 * restriction is that we do not allow userptr surfaces to be pinned to the
 * hardware and so we reject any attempt to create a framebuffer out of a
 * userptr.
733 734 735 736 737 738
 *
 * If you think this is a good interface to use to pass GPU memory between
 * drivers, please use dma-buf instead. In fact, wherever possible use
 * dma-buf instead.
 */
int
739 740 741
i915_gem_userptr_ioctl(struct drm_device *dev,
		       void *data,
		       struct drm_file *file)
742
{
743
	struct drm_i915_private *dev_priv = to_i915(dev);
744 745 746 747 748
	struct drm_i915_gem_userptr *args = data;
	struct drm_i915_gem_object *obj;
	int ret;
	u32 handle;

749
	if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
750 751 752 753 754 755
		/* We cannot support coherent userptr objects on hw without
		 * LLC and broken snooping.
		 */
		return -ENODEV;
	}

756 757 758 759
	if (args->flags & ~(I915_USERPTR_READ_ONLY |
			    I915_USERPTR_UNSYNCHRONIZED))
		return -EINVAL;

760 761 762
	if (!args->user_size)
		return -EINVAL;

763 764 765
	if (offset_in_page(args->user_ptr | args->user_size))
		return -EINVAL;

766
	if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
767 768 769
		return -EFAULT;

	if (args->flags & I915_USERPTR_READ_ONLY) {
770 771 772 773 774
		struct i915_hw_ppgtt *ppgtt;

		/*
		 * On almost all of the older hw, we cannot tell the GPU that
		 * a page is readonly.
775
		 */
776 777 778
		ppgtt = dev_priv->kernel_context->ppgtt;
		if (!ppgtt || !ppgtt->vm.has_read_only)
			return -ENODEV;
779 780
	}

781
	obj = i915_gem_object_alloc();
782 783 784 785 786
	if (obj == NULL)
		return -ENOMEM;

	drm_gem_private_object_init(dev, &obj->base, args->user_size);
	i915_gem_object_init(obj, &i915_gem_userptr_ops);
787 788
	obj->read_domains = I915_GEM_DOMAIN_CPU;
	obj->write_domain = I915_GEM_DOMAIN_CPU;
789
	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
790 791

	obj->userptr.ptr = args->user_ptr;
792 793
	if (args->flags & I915_USERPTR_READ_ONLY)
		i915_gem_object_set_readonly(obj);
794 795 796 797 798

	/* And keep a pointer to the current->mm for resolving the user pages
	 * at binding. This means that we need to hook into the mmu_notifier
	 * in order to detect if the mmu is destroyed.
	 */
799 800
	ret = i915_gem_userptr_init__mm_struct(obj);
	if (ret == 0)
801 802 803 804 805
		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
	if (ret == 0)
		ret = drm_gem_handle_create(file, &obj->base, &handle);

	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
806
	i915_gem_object_put(obj);
807 808 809 810 811 812 813
	if (ret)
		return ret;

	args->handle = handle;
	return 0;
}

814
int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
815
{
816 817
	mutex_init(&dev_priv->mm_lock);
	hash_init(dev_priv->mm_structs);
818 819

	dev_priv->mm.userptr_wq =
820
		alloc_workqueue("i915-userptr-acquire",
821
				WQ_HIGHPRI | WQ_UNBOUND,
822
				0);
823 824 825 826 827 828 829 830 831
	if (!dev_priv->mm.userptr_wq)
		return -ENOMEM;

	return 0;
}

void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
{
	destroy_workqueue(dev_priv->mm.userptr_wq);
832
}