i915_gem_pages.c 15.0 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * SPDX-License-Identifier: MIT
 *
 * Copyright © 2014-2016 Intel Corporation
 */

#include "i915_drv.h"
#include "i915_gem_object.h"
9
#include "i915_scatterlist.h"
10
#include "i915_gem_lmem.h"
11
#include "i915_gem_mman.h"
12 13 14 15 16 17 18 19 20 21 22

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages,
				 unsigned int sg_page_sizes)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned long supported = INTEL_INFO(i915)->page_sizes;
	int i;

	lockdep_assert_held(&obj->mm.lock);

23 24 25
	if (i915_gem_object_is_volatile(obj))
		obj->mm.madv = I915_MADV_DONTNEED;

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
	/* Make the pages coherent with the GPU (flushing any swapin). */
	if (obj->cache_dirty) {
		obj->write_domain = 0;
		if (i915_gem_object_has_struct_page(obj))
			drm_clflush_sg(pages);
		obj->cache_dirty = false;
	}

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;

	if (i915_gem_object_is_tiled(obj) &&
	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		GEM_BUG_ON(obj->mm.quirked);
		__i915_gem_object_pin_pages(obj);
		obj->mm.quirked = true;
	}

	GEM_BUG_ON(!sg_page_sizes);
	obj->mm.page_sizes.phys = sg_page_sizes;

	/*
	 * Calculate the supported page-sizes which fit into the given
	 * sg_page_sizes. This will give us the page-sizes which we may be able
	 * to use opportunistically when later inserting into the GTT. For
	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
	 * 64K or 4K pages, although in practice this will depend on a number of
	 * other factors.
	 */
	obj->mm.page_sizes.sg = 0;
	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
		if (obj->mm.page_sizes.phys & ~0u << i)
			obj->mm.page_sizes.sg |= BIT(i);
	}
	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));

64
	if (i915_gem_object_is_shrinkable(obj)) {
65
		struct list_head *list;
66 67 68 69
		unsigned long flags;

		spin_lock_irqsave(&i915->mm.obj_lock, flags);

70 71
		i915->mm.shrink_count++;
		i915->mm.shrink_memory += obj->base.size;
72 73 74 75 76 77

		if (obj->mm.madv != I915_MADV_WILLNEED)
			list = &i915->mm.purge_list;
		else
			list = &i915->mm.shrink_list;
		list_add_tail(&obj->mm.link, list);
78

79
		atomic_set(&obj->mm.shrink_pin, 0);
80
		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
81
	}
82 83 84 85
}

int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
86
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
87 88 89
	int err;

	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
90 91
		drm_dbg(&i915->drm,
			"Attempting to obtain a purgeable object\n");
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
		return -EFAULT;
	}

	err = obj->ops->get_pages(obj);
	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));

	return err;
}

/* Ensure that the associated pages are gathered from the backing storage
 * and pinned into our object. i915_gem_object_pin_pages() may be called
 * multiple times before they are released by a single call to
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	int err;

112
	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	if (err)
		return err;

	if (unlikely(!i915_gem_object_has_pages(obj))) {
		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;

		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);

unlock:
	mutex_unlock(&obj->mm.lock);
	return err;
}

/* Immediately discard the backing storage */
void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
	if (obj->ops->truncate)
		obj->ops->truncate(obj);
}

/* Try to discard unwanted pages */
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->mm.lock);
	GEM_BUG_ON(i915_gem_object_has_pages(obj));

	if (obj->ops->writeback)
		obj->ops->writeback(obj);
}

static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
	void __rcu **slot;

	rcu_read_lock();
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
	rcu_read_unlock();
}

161 162
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
163
	if (is_vmalloc_addr(ptr))
164 165 166 167 168
		vunmap(ptr);
	else
		kunmap(kmap_to_page(ptr));
}

169 170 171 172 173 174 175 176 177
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;

	pages = fetch_and_zero(&obj->mm.pages);
	if (IS_ERR_OR_NULL(pages))
		return pages;

178 179 180
	if (i915_gem_object_is_volatile(obj))
		obj->mm.madv = I915_MADV_WILLNEED;

181
	i915_gem_object_make_unshrinkable(obj);
182 183

	if (obj->mm.mapping) {
184
		unmap_object(obj, page_mask_bits(obj->mm.mapping));
185 186 187 188 189 190 191 192 193
		obj->mm.mapping = NULL;
	}

	__i915_gem_object_reset_page_iter(obj);
	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;

	return pages;
}

194
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
195 196 197 198 199 200 201 202
{
	struct sg_table *pages;
	int err;

	if (i915_gem_object_has_pinned_pages(obj))
		return -EBUSY;

	/* May be called by shrinker from within get_pages() (on another bo) */
203
	mutex_lock(&obj->mm.lock);
204 205 206 207 208
	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
		err = -EBUSY;
		goto unlock;
	}

209 210
	i915_gem_object_release_mmap_offset(obj);

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	/*
	 * ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early.
	 */
	pages = __i915_gem_object_unset_pages(obj);

	/*
	 * XXX Temporary hijinx to avoid updating all backends to handle
	 * NULL pages. In the future, when we have more asynchronous
	 * get_pages backends we should be better able to handle the
	 * cancellation of the async task in a more uniform manner.
	 */
	if (!pages && !i915_gem_object_needs_async_cancel(obj))
		pages = ERR_PTR(-EINVAL);

	if (!IS_ERR(pages))
		obj->ops->put_pages(obj, pages);

	err = 0;
unlock:
	mutex_unlock(&obj->mm.lock);

	return err;
}

237 238 239 240 241 242 243
static inline pte_t iomap_pte(resource_size_t base,
			      dma_addr_t offset,
			      pgprot_t prot)
{
	return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
}

244
/* The 'mapping' part of i915_gem_object_pin_map() below */
245
static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
246 247
				 enum i915_map_type type)
{
248
	unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
249
	struct sg_table *sgt = obj->mm.pages;
250 251
	pte_t *stack[32], **mem;
	struct vm_struct *area;
252
	pgprot_t pgprot;
253

254 255
	if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
		return NULL;
256

257 258 259 260
	if (GEM_WARN_ON(type == I915_MAP_WC &&
			!static_cpu_has(X86_FEATURE_PAT)))
		return NULL;

261
	/* A single page can always be kmapped */
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
	if (n_pte == 1 && type == I915_MAP_WB) {
		struct page *page = sg_page(sgt->sgl);

		/*
		 * On 32b, highmem using a finite set of indirect PTE (i.e.
		 * vmap) to provide virtual mappings of the high pages.
		 * As these are finite, map_new_virtual() must wait for some
		 * other kmap() to finish when it runs out. If we map a large
		 * number of objects, there is no method for it to tell us
		 * to release the mappings, and we deadlock.
		 *
		 * However, if we make an explicit vmap of the page, that
		 * uses a larger vmalloc arena, and also has the ability
		 * to tell us to release unwanted mappings. Most importantly,
		 * it will fail and propagate an error instead of waiting
		 * forever.
		 *
		 * So if the page is beyond the 32b boundary, make an explicit
		 * vmap. On 64b, this check will be optimised away as we can
		 * directly kmap any page on the system.
		 */
		if (!PageHighMem(page))
			return kmap(page);
	}
286

287 288
	mem = stack;
	if (n_pte > ARRAY_SIZE(stack)) {
289
		/* Too big for stack -- allocate temporary array instead */
290 291
		mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
		if (!mem)
292 293 294
			return NULL;
	}

295 296 297 298 299 300
	area = alloc_vm_area(obj->base.size, mem);
	if (!area) {
		if (mem != stack)
			kvfree(mem);
		return NULL;
	}
301 302 303 304

	switch (type) {
	default:
		MISSING_CASE(type);
305
		fallthrough;	/* to use PAGE_KERNEL anyway */
306 307 308 309 310 311 312 313
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	if (i915_gem_object_has_struct_page(obj)) {
		struct sgt_iter iter;
		struct page *page;
		pte_t **ptes = mem;

		for_each_sgt_page(page, iter, sgt)
			**ptes++ = mk_pte(page, pgprot);
	} else {
		resource_size_t iomap;
		struct sgt_iter iter;
		pte_t **ptes = mem;
		dma_addr_t addr;

		iomap = obj->mm.region->iomap.base;
		iomap -= obj->mm.region->region.start;

		for_each_sgt_daddr(addr, iter, sgt)
			**ptes++ = iomap_pte(iomap, addr, pgprot);
	}

	if (mem != stack)
		kvfree(mem);
336

337
	return area->addr;
338 339 340 341 342 343 344
}

/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
{
	enum i915_map_type has_type;
345
	unsigned int flags;
346 347 348 349
	bool pinned;
	void *ptr;
	int err;

350 351
	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
	if (!i915_gem_object_type_has(obj, flags))
352 353
		return ERR_PTR(-ENXIO);

354
	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
	if (err)
		return ERR_PTR(err);

	pinned = !(type & I915_MAP_OVERRIDE);
	type &= ~I915_MAP_OVERRIDE;

	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
		if (unlikely(!i915_gem_object_has_pages(obj))) {
			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

			err = ____i915_gem_object_get_pages(obj);
			if (err)
				goto err_unlock;

			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
		pinned = false;
	}
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));

	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
	if (ptr && has_type != type) {
		if (pinned) {
			err = -EBUSY;
			goto err_unpin;
		}

383
		unmap_object(obj, ptr);
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419

		ptr = obj->mm.mapping = NULL;
	}

	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			err = -ENOMEM;
			goto err_unpin;
		}

		obj->mm.mapping = page_pack_bits(ptr, type);
	}

out_unlock:
	mutex_unlock(&obj->mm.lock);
	return ptr;

err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(err);
	goto out_unlock;
}

void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
				 unsigned long offset,
				 unsigned long size)
{
	enum i915_map_type has_type;
	void *ptr;

	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
				     offset, size, obj->base.size));

420
	wmb(); /* let all previous writes be visible to coherent partners */
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
	obj->mm.dirty = true;

	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
		return;

	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
	if (has_type == I915_MAP_WC)
		return;

	drm_clflush_virt_range(ptr + offset, size);
	if (size == obj->base.size) {
		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
		obj->cache_dirty = false;
	}
}

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(!obj->mm.mapping);

	/*
	 * We allow removing the mapping from underneath pinned pages!
	 *
	 * Furthermore, since this is an unsafe operation reserved only
	 * for construction time manipulation, we ignore locking prudence.
	 */
	unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));

	i915_gem_object_unpin_map(obj);
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		void *entry;
		unsigned long i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		entry = xa_mk_value(idx);
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i, entry);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radix tree will contain a value entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(xa_is_value(sg))) {
		unsigned long base = xa_to_value(sg);

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

577 578 579 580 581 582 583 584 585 586 587 588 589 590
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
	if (!obj->mm.dirty)
		set_page_dirty(page);

	return page;
}

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
				    unsigned long n,
				    unsigned int *len)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);

	if (len)
		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);

	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	return i915_gem_object_get_dma_address_len(obj, n, NULL);
}