i915_gem_object.c 22.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2017 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <linux/sched/mm.h>

27
#include "display/intel_frontbuffer.h"
28
#include "pxp/intel_pxp.h"
29
#include "i915_drv.h"
30
#include "i915_gem_clflush.h"
31
#include "i915_gem_context.h"
32
#include "i915_gem_mman.h"
33
#include "i915_gem_object.h"
34
#include "i915_gem_ttm.h"
35
#include "i915_memcpy.h"
36
#include "i915_trace.h"
37

38
static struct kmem_cache *slab_objects;
39

40 41
static const struct drm_gem_object_funcs i915_gem_object_funcs;

42 43
struct drm_i915_gem_object *i915_gem_object_alloc(void)
{
44 45
	struct drm_i915_gem_object *obj;

46
	obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
47 48 49 50 51
	if (!obj)
		return NULL;
	obj->base.funcs = &i915_gem_object_funcs;

	return obj;
52 53 54 55
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
56
	return kmem_cache_free(slab_objects, obj);
57 58
}

59
void i915_gem_object_init(struct drm_i915_gem_object *obj,
60
			  const struct drm_i915_gem_object_ops *ops,
61
			  struct lock_class_key *key, unsigned flags)
62
{
63 64 65 66 67 68 69
	/*
	 * A gem object is embedded both in a struct ttm_buffer_object :/ and
	 * in a drm_i915_gem_object. Make sure they are aliased.
	 */
	BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
		     offsetof(typeof(*obj), __do_not_access.base));

70 71 72
	spin_lock_init(&obj->vma.lock);
	INIT_LIST_HEAD(&obj->vma.list);

73 74
	INIT_LIST_HEAD(&obj->mm.link);

75
	INIT_LIST_HEAD(&obj->lut_list);
76
	spin_lock_init(&obj->lut_lock);
77

78
	spin_lock_init(&obj->mmo.lock);
79
	obj->mmo.offsets = RB_ROOT;
80

81 82 83
	init_rcu_head(&obj->rcu);

	obj->ops = ops;
84 85
	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
	obj->flags = flags;
86 87 88 89

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
90 91
	INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_dma_page.lock);
92 93
}

94
/**
95
 * __i915_gem_object_fini - Clean up a GEM object initialization
96 97 98 99 100 101 102 103 104 105 106 107 108 109
 * @obj: The gem object to cleanup
 *
 * This function cleans up gem object fields that are set up by
 * drm_gem_private_object_init() and i915_gem_object_init().
 * It's primarily intended as a helper for backends that need to
 * clean up the gem object in separate steps.
 */
void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
{
	mutex_destroy(&obj->mm.get_page.lock);
	mutex_destroy(&obj->mm.get_dma_page.lock);
	dma_resv_fini(&obj->base._resv);
}

110
/**
111 112
 * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels
 * for a given cache_level
113 114 115 116 117 118
 * @obj: #drm_i915_gem_object
 * @cache_level: cache level
 */
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
					 unsigned int cache_level)
{
119 120
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

121 122 123 124 125
	obj->cache_level = cache_level;

	if (cache_level != I915_CACHE_NONE)
		obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
				       I915_BO_CACHE_COHERENT_FOR_WRITE);
126
	else if (HAS_LLC(i915))
127 128 129 130 131
		obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
	else
		obj->cache_coherent = 0;

	obj->cache_dirty =
132 133
		!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
		!IS_DGFX(i915);
134
}
135

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	/*
	 * This is purely from a security perspective, so we simply don't care
	 * about non-userspace objects being able to bypass the LLC.
	 */
	if (!(obj->flags & I915_BO_ALLOC_USER))
		return false;

	/*
	 * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
	 * possible for userspace to bypass the GTT caching bits set by the
	 * kernel, as per the given object cache_level. This is troublesome
	 * since the heavy flush we apply when first gathering the pages is
	 * skipped if the kernel thinks the object is coherent with the GPU. As
	 * a result it might be possible to bypass the cache and read the
	 * contents of the page directly, which could be stale data. If it's
	 * just a case of userspace shooting themselves in the foot then so be
	 * it, but since i915 takes the stance of always zeroing memory before
	 * handing it to userspace, we need to prevent this.
	 */
	return IS_JSL_EHL(i915);
}

162
static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
163 164 165
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
166
	struct i915_lut_handle bookmark = {};
167
	struct i915_mmap_offset *mmo, *mn;
168
	struct i915_lut_handle *lut, *ln;
169
	LIST_HEAD(close);
170

171
	spin_lock(&obj->lut_lock);
172 173 174
	list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
		struct i915_gem_context *ctx = lut->ctx;

175 176 177 178
		if (ctx && ctx->file_priv == fpriv) {
			i915_gem_context_get(ctx);
			list_move(&lut->obj_link, &close);
		}
179

180 181 182 183 184 185 186
		/* Break long locks, and carefully continue on from this spot */
		if (&ln->obj_link != &obj->lut_list) {
			list_add_tail(&bookmark.obj_link, &ln->obj_link);
			if (cond_resched_lock(&obj->lut_lock))
				list_safe_reset_next(&bookmark, ln, obj_link);
			__list_del_entry(&bookmark.obj_link);
		}
187
	}
188
	spin_unlock(&obj->lut_lock);
189

190
	spin_lock(&obj->mmo.lock);
191
	rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
192 193 194
		drm_vma_node_revoke(&mmo->vma_node, file);
	spin_unlock(&obj->mmo.lock);

195 196 197
	list_for_each_entry_safe(lut, ln, &close, obj_link) {
		struct i915_gem_context *ctx = lut->ctx;
		struct i915_vma *vma;
198

199 200
		/*
		 * We allow the process to have multiple handles to the same
201 202 203
		 * vma, in the same fd namespace, by virtue of flink/open.
		 */

204
		mutex_lock(&ctx->lut_mutex);
205 206 207 208
		vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
		if (vma) {
			GEM_BUG_ON(vma->obj != obj);
			GEM_BUG_ON(!atomic_read(&vma->open_count));
C
Chris Wilson 已提交
209
			i915_vma_close(vma);
210
		}
211
		mutex_unlock(&ctx->lut_mutex);
212 213

		i915_gem_context_put(lut->ctx);
214
		i915_lut_handle_free(lut);
215
		i915_gem_object_put(obj);
216 217 218
	}
}

219
void __i915_gem_free_object_rcu(struct rcu_head *head)
220 221 222 223 224 225 226 227 228 229 230
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	i915_gem_object_free(obj);

	GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
	atomic_dec(&i915->mm.free_count);
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
{
	/* Skip serialisation and waking the device if known to be not used. */

	if (obj->userfault_count)
		i915_gem_object_release_mmap_gtt(obj);

	if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
		struct i915_mmap_offset *mmo, *mn;

		i915_gem_object_release_mmap_offset(obj);

		rbtree_postorder_for_each_entry_safe(mmo, mn,
						     &obj->mmo.offsets,
						     offset) {
			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
					      &mmo->vma_node);
			kfree(mmo);
		}
		obj->mmo.offsets = RB_ROOT;
	}
}

254 255 256 257 258 259 260 261 262 263
/**
 * __i915_gem_object_pages_fini - Clean up pages use of a gem object
 * @obj: The gem object to clean up
 *
 * This function cleans up usage of the object mm.pages member. It
 * is intended for backends that need to clean up a gem object in
 * separate steps and needs to be called when the object is idle before
 * the object's backing memory is freed.
 */
void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
264
{
265 266
	assert_object_held(obj);

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
	if (!list_empty(&obj->vma.list)) {
		struct i915_vma *vma;

		/*
		 * Note that the vma keeps an object reference while
		 * it is active, so it *should* not sleep while we
		 * destroy it. Our debug code errs insits it *might*.
		 * For the moment, play along.
		 */
		spin_lock(&obj->vma.lock);
		while ((vma = list_first_entry_or_null(&obj->vma.list,
						       struct i915_vma,
						       obj_link))) {
			GEM_BUG_ON(vma->obj != obj);
			spin_unlock(&obj->vma.lock);
282

283
			__i915_vma_put(vma);
284 285

			spin_lock(&obj->vma.lock);
286 287 288
		}
		spin_unlock(&obj->vma.lock);
	}
289

290
	__i915_gem_object_free_mmaps(obj);
291

292 293 294
	atomic_set(&obj->mm.pages_pin_count, 0);
	__i915_gem_object_put_pages(obj);
	GEM_BUG_ON(i915_gem_object_has_pages(obj));
295 296 297 298 299 300 301 302
}

void __i915_gem_free_object(struct drm_i915_gem_object *obj)
{
	trace_i915_gem_object_destroy(obj);

	GEM_BUG_ON(!list_empty(&obj->lut_list));

303
	bitmap_free(obj->bit_17);
304

305 306
	if (obj->base.import_attach)
		drm_prime_gem_destroy(&obj->base, NULL);
307

308
	drm_gem_free_mmap_offset(&obj->base);
309

310 311
	if (obj->ops->release)
		obj->ops->release(obj);
312

313 314
	if (obj->mm.n_placements > 1)
		kfree(obj->mm.placements);
315

316 317
	if (obj->shares_resv_from)
		i915_vm_resv_put(obj->shares_resv_from);
318 319

	__i915_gem_object_fini(obj);
320
}
321

322 323 324 325
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
{
	struct drm_i915_gem_object *obj, *on;
326

327 328 329 330 331 332
	llist_for_each_entry_safe(obj, on, freed, freed) {
		might_sleep();
		if (obj->ops->delayed_free) {
			obj->ops->delayed_free(obj);
			continue;
		}
333

334 335 336 337 338 339 340
		if (!i915_gem_object_trylock(obj, NULL)) {
			/* busy, toss it back to the pile */
			if (llist_add(&obj->freed, &i915->mm.free_list))
				queue_delayed_work(i915->wq, &i915->mm.free_work, msecs_to_jiffies(10));
			continue;
		}

341
		__i915_gem_object_pages_fini(obj);
342
		i915_gem_object_unlock(obj);
343
		__i915_gem_free_object(obj);
344

345 346
		/* But keep the pointer alive for RCU-protected lookups */
		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
347
		cond_resched();
348 349 350 351 352
	}
}

void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
353 354 355
	struct llist_node *freed = llist_del_all(&i915->mm.free_list);

	if (unlikely(freed))
356 357 358 359 360 361
		__i915_gem_free_objects(i915, freed);
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
362
		container_of(work, struct drm_i915_private, mm.free_work.work);
363

364
	i915_gem_flush_free_objects(i915);
365 366
}

367
static void i915_gem_free_object(struct drm_gem_object *gem_obj)
368
{
369
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
370 371
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

372 373
	GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));

374
	/*
375 376 377 378
	 * Before we free the object, make sure any pure RCU-only
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
379
	 */
380 381
	atomic_inc(&i915->mm.free_count);

382 383 384 385 386 387 388 389 390 391
	/*
	 * Since we require blocking on struct_mutex to unbind the freed
	 * object from the GPU before releasing resources back to the
	 * system, we can not do that directly from the RCU callback (which may
	 * be a softirq context), but must instead then defer that work onto a
	 * kthread. We use the RCU callback rather than move the freed object
	 * directly onto the work queue so that we can mix between using the
	 * worker and performing frees directly from subsequent allocations for
	 * crude but effective memory throttling.
	 */
392

393
	if (llist_add(&obj->freed, &i915->mm.free_list))
394
		queue_delayed_work(i915->wq, &i915->mm.free_work, 0);
395 396
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
					 enum fb_op_origin origin)
{
	struct intel_frontbuffer *front;

	front = __intel_frontbuffer_get(obj);
	if (front) {
		intel_frontbuffer_flush(front, origin);
		intel_frontbuffer_put(front);
	}
}

void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
					      enum fb_op_origin origin)
{
	struct intel_frontbuffer *front;

	front = __intel_frontbuffer_get(obj);
	if (front) {
		intel_frontbuffer_invalidate(front, origin);
		intel_frontbuffer_put(front);
	}
}

421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
	void *src_map;
	void *src_ptr;

	src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));

	src_ptr = src_map + offset_in_page(offset);
	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
		drm_clflush_virt_range(src_ptr, size);
	memcpy(dst, src_ptr, size);

	kunmap_atomic(src_map);
}

static void
i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
	void __iomem *src_map;
	void __iomem *src_ptr;
	dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);

	src_map = io_mapping_map_wc(&obj->mm.region->iomap,
				    dma - obj->mm.region->region.start,
				    PAGE_SIZE);

	src_ptr = src_map + offset_in_page(offset);
	if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
		memcpy_fromio(dst, src_ptr, size);

	io_mapping_unmap(src_map);
}

/**
 * i915_gem_object_read_from_page - read data from the page of a GEM object
 * @obj: GEM object to read from
 * @offset: offset within the object
 * @dst: buffer to store the read data
 * @size: size to read
 *
 * Reads data from @obj at the specified offset. The requested region to read
 * from can't cross a page boundary. The caller must ensure that @obj pages
 * are pinned and that @obj is synced wrt. any related writes.
 *
466
 * Return: %0 on success or -ENODEV if the type of @obj's backing store is
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
 * unsupported.
 */
int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
	GEM_BUG_ON(offset >= obj->base.size);
	GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

	if (i915_gem_object_has_struct_page(obj))
		i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
	else if (i915_gem_object_has_iomem(obj))
		i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
	else
		return -ENODEV;

	return 0;
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
/**
 * i915_gem_object_evictable - Whether object is likely evictable after unbind.
 * @obj: The object to check
 *
 * This function checks whether the object is likely unvictable after unbind.
 * If the object is not locked when checking, the result is only advisory.
 * If the object is locked when checking, and the function returns true,
 * then an eviction should indeed be possible. But since unlocked vma
 * unpinning and unbinding is currently possible, the object can actually
 * become evictable even if this function returns false.
 *
 * Return: true if the object may be evictable. False otherwise.
 */
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;
	int pin_count = atomic_read(&obj->mm.pages_pin_count);

	if (!pin_count)
		return true;

	spin_lock(&obj->vma.lock);
	list_for_each_entry(vma, &obj->vma.list, obj_link) {
		if (i915_vma_is_pinned(vma)) {
			spin_unlock(&obj->vma.lock);
			return false;
		}
		if (atomic_read(&vma->pages_count))
			pin_count--;
	}
	spin_unlock(&obj->vma.lock);
	GEM_WARN_ON(pin_count < 0);

	return pin_count == 0;
}

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
/**
 * i915_gem_object_migratable - Whether the object is migratable out of the
 * current region.
 * @obj: Pointer to the object.
 *
 * Return: Whether the object is allowed to be resident in other
 * regions than the current while pages are present.
 */
bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
{
	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);

	if (!mr)
		return false;

	return obj->mm.n_placements > 1;
}

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
/**
 * i915_gem_object_has_struct_page - Whether the object is page-backed
 * @obj: The object to query.
 *
 * This function should only be called while the object is locked or pinned,
 * otherwise the page backing may change under the caller.
 *
 * Return: True if page-backed, false otherwise.
 */
bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
#ifdef CONFIG_LOCKDEP
	if (IS_DGFX(to_i915(obj->base.dev)) &&
	    i915_gem_object_evictable((void __force *)obj))
		assert_object_held_shared(obj);
#endif
	return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
}

/**
 * i915_gem_object_has_iomem - Whether the object is iomem-backed
 * @obj: The object to query.
 *
 * This function should only be called while the object is locked or pinned,
 * otherwise the iomem backing may change under the caller.
 *
 * Return: True if iomem-backed, false otherwise.
 */
bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
{
#ifdef CONFIG_LOCKDEP
	if (IS_DGFX(to_i915(obj->base.dev)) &&
	    i915_gem_object_evictable((void __force *)obj))
		assert_object_held_shared(obj);
#endif
	return obj->mem_flags & I915_BO_FLAG_IOMEM;
}

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
/**
 * i915_gem_object_can_migrate - Whether an object likely can be migrated
 *
 * @obj: The object to migrate
 * @id: The region intended to migrate to
 *
 * Check whether the object backend supports migration to the
 * given region. Note that pinning may affect the ability to migrate as
 * returned by this function.
 *
 * This function is primarily intended as a helper for checking the
 * possibility to migrate objects and might be slightly less permissive
 * than i915_gem_object_migrate() when it comes to objects with the
 * I915_BO_ALLOC_USER flag set.
 *
 * Return: true if migration is possible, false otherwise.
 */
bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
				 enum intel_region_id id)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned int num_allowed = obj->mm.n_placements;
	struct intel_memory_region *mr;
	unsigned int i;

	GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
	GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);

	mr = i915->mm.regions[id];
	if (!mr)
		return false;

	if (obj->mm.region == mr)
		return true;

	if (!i915_gem_object_evictable(obj))
		return false;

	if (!obj->ops->migrate)
		return false;

	if (!(obj->flags & I915_BO_ALLOC_USER))
		return true;

	if (num_allowed == 0)
		return false;

	for (i = 0; i < num_allowed; ++i) {
		if (mr == obj->mm.placements[i])
			return true;
	}

	return false;
}

/**
 * i915_gem_object_migrate - Migrate an object to the desired region id
 * @obj: The object to migrate.
 * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
 * not be successful in evicting other objects to make room for this object.
 * @id: The region id to migrate to.
 *
 * Attempt to migrate the object to the desired memory region. The
 * object backend must support migration and the object may not be
 * pinned, (explicitly pinned pages or pinned vmas). The object must
 * be locked.
 * On successful completion, the object will have pages pointing to
 * memory in the new region, but an async migration task may not have
 * completed yet, and to accomplish that, i915_gem_object_wait_migration()
 * must be called.
 *
 * Note: the @ww parameter is not used yet, but included to make sure
 * callers put some effort into obtaining a valid ww ctx if one is
 * available.
 *
 * Return: 0 on success. Negative error code on failure. In particular may
 * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
 * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
 * -EBUSY if the object is pinned.
 */
int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
			    struct i915_gem_ww_ctx *ww,
			    enum intel_region_id id)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct intel_memory_region *mr;

	GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
	GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
	assert_object_held(obj);

	mr = i915->mm.regions[id];
	GEM_BUG_ON(!mr);

671 672
	if (!i915_gem_object_can_migrate(obj, id))
		return -EINVAL;
673

674 675 676 677 678 679
	if (!obj->ops->migrate) {
		if (GEM_WARN_ON(obj->mm.region != mr))
			return -EINVAL;
		return 0;
	}

680 681 682
	return obj->ops->migrate(obj, mr);
}

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
/**
 * i915_gem_object_placement_possible - Check whether the object can be
 * placed at certain memory type
 * @obj: Pointer to the object
 * @type: The memory type to check
 *
 * Return: True if the object can be placed in @type. False otherwise.
 */
bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
					enum intel_memory_type type)
{
	unsigned int i;

	if (!obj->mm.n_placements) {
		switch (type) {
		case INTEL_MEMORY_LOCAL:
			return i915_gem_object_has_iomem(obj);
		case INTEL_MEMORY_SYSTEM:
			return i915_gem_object_has_pages(obj);
		default:
			/* Ignore stolen for now */
			GEM_BUG_ON(1);
			return false;
		}
	}

	for (i = 0; i < obj->mm.n_placements; i++) {
		if (obj->mm.placements[i]->type == type)
			return true;
	}

	return false;
}

717 718
void i915_gem_init__objects(struct drm_i915_private *i915)
{
719
	INIT_DELAYED_WORK(&i915->mm.free_work, __i915_gem_free_work);
720 721
}

722
void i915_objects_module_exit(void)
723
{
724
	kmem_cache_destroy(slab_objects);
725 726
}

727
int __init i915_objects_module_init(void)
728
{
729 730
	slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
	if (!slab_objects)
731 732 733 734
		return -ENOMEM;

	return 0;
}
735

736 737 738 739 740 741
static const struct drm_gem_object_funcs i915_gem_object_funcs = {
	.free = i915_gem_free_object,
	.close = i915_gem_close_object,
	.export = i915_gem_prime_export,
};

742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
/**
 * i915_gem_object_get_moving_fence - Get the object's moving fence if any
 * @obj: The object whose moving fence to get.
 *
 * A non-signaled moving fence means that there is an async operation
 * pending on the object that needs to be waited on before setting up
 * any GPU- or CPU PTEs to the object's pages.
 *
 * Return: A refcounted pointer to the object's moving fence if any,
 * NULL otherwise.
 */
struct dma_fence *
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
{
	return dma_fence_get(i915_gem_to_ttm(obj)->moving);
}

/**
 * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
 * @obj: The object whose moving fence to wait for.
 * @intr: Whether to wait interruptible.
 *
 * If the moving fence signaled without an error, it is detached from the
 * object and put.
 *
 * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted,
 * negative error code if the async operation represented by the
 * moving fence failed.
 */
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
				      bool intr)
{
	struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
	int ret;

	assert_object_held(obj);
	if (!fence)
		return 0;

	ret = dma_fence_wait(fence, intr);
	if (ret)
		return ret;

	if (fence->error)
		return fence->error;

	i915_gem_to_ttm(obj)->moving = NULL;
	dma_fence_put(fence);
	return 0;
}

793 794 795
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
796
#include "selftests/i915_gem_migrate.c"
797 798 799
#include "selftests/i915_gem_object.c"
#include "selftests/i915_gem_coherency.c"
#endif