i915_gem_pages.c 15.7 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2014-2016 Intel Corporation
 */

#include "i915_drv.h"
#include "i915_gem_object.h"
9
#include "i915_scatterlist.h"
10
#include "i915_gem_lmem.h"
11
#include "i915_gem_mman.h"
12 13 14 15 16 17 18

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages,
				 unsigned int sg_page_sizes)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned long supported = INTEL_INFO(i915)->page_sizes;
19
	bool shrinkable;
20 21 22 23
	int i;

	lockdep_assert_held(&obj->mm.lock);

24 25 26
	if (i915_gem_object_is_volatile(obj))
		obj->mm.madv = I915_MADV_DONTNEED;

27 28 29 30 31 32 33 34 35 36
	/* Make the pages coherent with the GPU (flushing any swapin). */
	if (obj->cache_dirty) {
		obj->write_domain = 0;
		if (i915_gem_object_has_struct_page(obj))
			drm_clflush_sg(pages);
		obj->cache_dirty = false;
	}

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;
37 38
	obj->mm.get_dma_page.sg_pos = pages->sgl;
	obj->mm.get_dma_page.sg_idx = 0;
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

	obj->mm.pages = pages;

	GEM_BUG_ON(!sg_page_sizes);
	obj->mm.page_sizes.phys = sg_page_sizes;

	/*
	 * Calculate the supported page-sizes which fit into the given
	 * sg_page_sizes. This will give us the page-sizes which we may be able
	 * to use opportunistically when later inserting into the GTT. For
	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
	 * 64K or 4K pages, although in practice this will depend on a number of
	 * other factors.
	 */
	obj->mm.page_sizes.sg = 0;
	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
		if (obj->mm.page_sizes.phys & ~0u << i)
			obj->mm.page_sizes.sg |= BIT(i);
	}
	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));

60 61 62 63 64 65 66 67 68 69
	shrinkable = i915_gem_object_is_shrinkable(obj);

	if (i915_gem_object_is_tiled(obj) &&
	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
		i915_gem_object_set_tiling_quirk(obj);
		shrinkable = false;
	}

	if (shrinkable) {
70
		struct list_head *list;
71 72 73 74
		unsigned long flags;

		spin_lock_irqsave(&i915->mm.obj_lock, flags);

75 76
		i915->mm.shrink_count++;
		i915->mm.shrink_memory += obj->base.size;
77 78 79 80 81 82

		if (obj->mm.madv != I915_MADV_WILLNEED)
			list = &i915->mm.purge_list;
		else
			list = &i915->mm.shrink_list;
		list_add_tail(&obj->mm.link, list);
83

84
		atomic_set(&obj->mm.shrink_pin, 0);
85
		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
86
	}
87 88 89 90
}

int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
91
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
92 93 94
	int err;

	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
95 96
		drm_dbg(&i915->drm,
			"Attempting to obtain a purgeable object\n");
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
		return -EFAULT;
	}

	err = obj->ops->get_pages(obj);
	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));

	return err;
}

/* Ensure that the associated pages are gathered from the backing storage
 * and pinned into our object. i915_gem_object_pin_pages() may be called
 * multiple times before they are released by a single call to
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	int err;

117
	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	if (err)
		return err;

	if (unlikely(!i915_gem_object_has_pages(obj))) {
		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;

		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);

unlock:
	mutex_unlock(&obj->mm.lock);
	return err;
}

/* Immediately discard the backing storage */
void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
	if (obj->ops->truncate)
		obj->ops->truncate(obj);
}

/* Try to discard unwanted pages */
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->mm.lock);
	GEM_BUG_ON(i915_gem_object_has_pages(obj));

	if (obj->ops->writeback)
		obj->ops->writeback(obj);
}

static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
	void __rcu **slot;

	rcu_read_lock();
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
163 164
	radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
165 166 167
	rcu_read_unlock();
}

168 169
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
170
	if (is_vmalloc_addr(ptr))
171 172 173
		vunmap(ptr);
}

174 175 176 177 178 179 180 181 182
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;

	pages = fetch_and_zero(&obj->mm.pages);
	if (IS_ERR_OR_NULL(pages))
		return pages;

183 184 185
	if (i915_gem_object_is_volatile(obj))
		obj->mm.madv = I915_MADV_WILLNEED;

186
	i915_gem_object_make_unshrinkable(obj);
187 188

	if (obj->mm.mapping) {
189
		unmap_object(obj, page_mask_bits(obj->mm.mapping));
190 191 192 193 194 195 196 197 198
		obj->mm.mapping = NULL;
	}

	__i915_gem_object_reset_page_iter(obj);
	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;

	return pages;
}

199
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
200 201 202 203 204 205 206 207
{
	struct sg_table *pages;
	int err;

	if (i915_gem_object_has_pinned_pages(obj))
		return -EBUSY;

	/* May be called by shrinker from within get_pages() (on another bo) */
208
	mutex_lock(&obj->mm.lock);
209 210 211 212 213
	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
		err = -EBUSY;
		goto unlock;
	}

214 215
	i915_gem_object_release_mmap_offset(obj);

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
	/*
	 * ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early.
	 */
	pages = __i915_gem_object_unset_pages(obj);

	/*
	 * XXX Temporary hijinx to avoid updating all backends to handle
	 * NULL pages. In the future, when we have more asynchronous
	 * get_pages backends we should be better able to handle the
	 * cancellation of the async task in a more uniform manner.
	 */
	if (!pages && !i915_gem_object_needs_async_cancel(obj))
		pages = ERR_PTR(-EINVAL);

	if (!IS_ERR(pages))
		obj->ops->put_pages(obj, pages);

	err = 0;
unlock:
	mutex_unlock(&obj->mm.lock);

	return err;
}

/* The 'mapping' part of i915_gem_object_pin_map() below */
243
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
244
				      enum i915_map_type type)
245
{
246 247 248
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
	struct page *stack[32], **pages = stack, *page;
	struct sgt_iter iter;
249
	pgprot_t pgprot;
250
	void *vaddr;
251

252 253 254 255 256
	switch (type) {
	default:
		MISSING_CASE(type);
		fallthrough;	/* to use PAGE_KERNEL anyway */
	case I915_MAP_WB:
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
		/*
		 * On 32b, highmem using a finite set of indirect PTE (i.e.
		 * vmap) to provide virtual mappings of the high pages.
		 * As these are finite, map_new_virtual() must wait for some
		 * other kmap() to finish when it runs out. If we map a large
		 * number of objects, there is no method for it to tell us
		 * to release the mappings, and we deadlock.
		 *
		 * However, if we make an explicit vmap of the page, that
		 * uses a larger vmalloc arena, and also has the ability
		 * to tell us to release unwanted mappings. Most importantly,
		 * it will fail and propagate an error instead of waiting
		 * forever.
		 *
		 * So if the page is beyond the 32b boundary, make an explicit
272
		 * vmap.
273
		 */
274 275
		if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
			return page_address(sg_page(obj->mm.pages->sgl));
276 277 278 279 280 281 282
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}

283 284 285 286
	if (n_pages > ARRAY_SIZE(stack)) {
		/* Too big for stack -- allocate temporary array instead */
		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
		if (!pages)
287
			return ERR_PTR(-ENOMEM);
288
	}
289

290 291 292 293 294 295
	i = 0;
	for_each_sgt_page(page, iter, obj->mm.pages)
		pages[i++] = page;
	vaddr = vmap(pages, n_pages, 0, pgprot);
	if (pages != stack)
		kvfree(pages);
296 297

	return vaddr ?: ERR_PTR(-ENOMEM);
298
}
299

300
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
301
				     enum i915_map_type type)
302 303 304 305 306 307 308 309 310 311
{
	resource_size_t iomap = obj->mm.region->iomap.base -
		obj->mm.region->region.start;
	unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
	unsigned long stack[32], *pfns = stack, i;
	struct sgt_iter iter;
	dma_addr_t addr;
	void *vaddr;

	if (type != I915_MAP_WC)
312
		return ERR_PTR(-ENODEV);
313

314 315 316 317
	if (n_pfn > ARRAY_SIZE(stack)) {
		/* Too big for stack -- allocate temporary array instead */
		pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
		if (!pfns)
318
			return ERR_PTR(-ENOMEM);
319 320
	}

321 322 323 324 325 326
	i = 0;
	for_each_sgt_daddr(addr, iter, obj->mm.pages)
		pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
	vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
	if (pfns != stack)
		kvfree(pfns);
327 328

	return vaddr ?: ERR_PTR(-ENOMEM);
329 330 331 332 333 334 335
}

/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
{
	enum i915_map_type has_type;
336
	unsigned int flags;
337 338 339 340
	bool pinned;
	void *ptr;
	int err;

341 342
	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
	if (!i915_gem_object_type_has(obj, flags))
343 344
		return ERR_PTR(-ENXIO);

345
	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
346 347 348 349 350 351 352 353 354 355 356
	if (err)
		return ERR_PTR(err);

	pinned = !(type & I915_MAP_OVERRIDE);
	type &= ~I915_MAP_OVERRIDE;

	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
		if (unlikely(!i915_gem_object_has_pages(obj))) {
			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

			err = ____i915_gem_object_get_pages(obj);
357 358 359 360
			if (err) {
				ptr = ERR_PTR(err);
				goto out_unlock;
			}
361 362 363 364 365 366 367 368 369 370 371

			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
		pinned = false;
	}
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));

	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
	if (ptr && has_type != type) {
		if (pinned) {
372
			ptr = ERR_PTR(-EBUSY);
373 374 375
			goto err_unpin;
		}

376
		unmap_object(obj, ptr);
377 378 379 380 381

		ptr = obj->mm.mapping = NULL;
	}

	if (!ptr) {
382 383
		if (GEM_WARN_ON(type == I915_MAP_WC &&
				!static_cpu_has(X86_FEATURE_PAT)))
384
			ptr = ERR_PTR(-ENODEV);
385 386 387 388
		else if (i915_gem_object_has_struct_page(obj))
			ptr = i915_gem_object_map_page(obj, type);
		else
			ptr = i915_gem_object_map_pfn(obj, type);
389
		if (IS_ERR(ptr))
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
			goto err_unpin;

		obj->mm.mapping = page_pack_bits(ptr, type);
	}

out_unlock:
	mutex_unlock(&obj->mm.lock);
	return ptr;

err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
	goto out_unlock;
}

void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
				 unsigned long offset,
				 unsigned long size)
{
	enum i915_map_type has_type;
	void *ptr;

	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
				     offset, size, obj->base.size));

415
	wmb(); /* let all previous writes be visible to coherent partners */
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
	obj->mm.dirty = true;

	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
		return;

	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
	if (has_type == I915_MAP_WC)
		return;

	drm_clflush_virt_range(ptr + offset, size);
	if (size == obj->base.size) {
		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
		obj->cache_dirty = false;
	}
}

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(!obj->mm.mapping);

	/*
	 * We allow removing the mapping from underneath pinned pages!
	 *
	 * Furthermore, since this is an unsafe operation reserved only
	 * for construction time manipulation, we ignore locking prudence.
	 */
	unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));

	i915_gem_object_unpin_map(obj);
}

447
struct scatterlist *
448 449 450
__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
			 struct i915_gem_object_page_iter *iter,
			 unsigned int n,
451 452
			 unsigned int *offset,
			 bool allow_alloc)
453
{
454
	const bool dma = iter == &obj->mm.get_dma_page;
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

474 475 476
	if (!allow_alloc)
		goto manual_lookup;

477 478 479 480 481 482 483 484 485
	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
486
	count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513

	while (idx + count <= n) {
		void *entry;
		unsigned long i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		entry = xa_mk_value(idx);
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i, entry);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
514
		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
515 516 517 518 519 520 521 522 523 524 525
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

526 527 528 529 530 531 532 533 534 535
	goto manual_walk;

manual_lookup:
	idx = 0;
	sg = obj->mm.pages->sgl;
	count = __sg_page_count(sg);

manual_walk:
	/*
	 * In case we failed to insert the entry into the radixtree, we need
536 537 538 539 540
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
541
		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radix tree will contain a value entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(xa_is_value(sg))) {
		unsigned long base = xa_to_value(sg);

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

582
	sg = i915_gem_object_get_sg(obj, n, &offset, true);
583 584 585
	return nth_page(sg_page(sg), offset);
}

586 587 588 589 590 591 592 593 594 595 596 597 598 599
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
	if (!obj->mm.dirty)
		set_page_dirty(page);

	return page;
}

600 601 602 603 604 605 606 607
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
				    unsigned long n,
				    unsigned int *len)
{
	struct scatterlist *sg;
	unsigned int offset;

608
	sg = i915_gem_object_get_sg_dma(obj, n, &offset, true);
609 610 611 612 613 614 615 616 617 618 619 620 621

	if (len)
		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);

	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	return i915_gem_object_get_dma_address_len(obj, n, NULL);
}