i915_gem.c 130.3 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_gem_dmabuf.h"
33
#include "i915_vgpu.h"
C
Chris Wilson 已提交
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36
#include "intel_frontbuffer.h"
37
#include "intel_mocs.h"
38
#include <linux/reservation.h>
39
#include <linux/shmem_fs.h>
40
#include <linux/slab.h>
41
#include <linux/swap.h>
J
Jesse Barnes 已提交
42
#include <linux/pci.h>
43
#include <linux/dma-buf.h>
44

45
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
46
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
47

48 49 50 51 52 53
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
}

54 55
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
56 57 58
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

59 60 61 62 63 64
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
static int
insert_mappable_node(struct drm_i915_private *i915,
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
	return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
						   size, 0, 0, 0,
						   i915->ggtt.mappable_end,
						   DRM_MM_SEARCH_DEFAULT,
						   DRM_MM_CREATE_DEFAULT);
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

83 84
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
85
				  u64 size)
86
{
87
	spin_lock(&dev_priv->mm.object_stat_lock);
88 89
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
90
	spin_unlock(&dev_priv->mm.object_stat_lock);
91 92 93
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
94
				     u64 size)
95
{
96
	spin_lock(&dev_priv->mm.object_stat_lock);
97 98
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
99
	spin_unlock(&dev_priv->mm.object_stat_lock);
100 101
}

102
static int
103
i915_gem_wait_for_error(struct i915_gpu_error *error)
104 105 106
{
	int ret;

107
	if (!i915_reset_in_progress(error))
108 109
		return 0;

110 111 112 113 114
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
115
	ret = wait_event_interruptible_timeout(error->reset_queue,
116
					       !i915_reset_in_progress(error),
117
					       10*HZ);
118 119 120 121
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
122
		return ret;
123 124
	} else {
		return 0;
125
	}
126 127
}

128
int i915_mutex_lock_interruptible(struct drm_device *dev)
129
{
130
	struct drm_i915_private *dev_priv = to_i915(dev);
131 132
	int ret;

133
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
134 135 136 137 138 139 140 141 142
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
143

144 145
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
146
			    struct drm_file *file)
147
{
148
	struct drm_i915_private *dev_priv = to_i915(dev);
149
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
150
	struct drm_i915_gem_get_aperture *args = data;
151
	struct i915_vma *vma;
152
	size_t pinned;
153

154
	pinned = 0;
155
	mutex_lock(&dev->struct_mutex);
156
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
157
		if (i915_vma_is_pinned(vma))
158
			pinned += vma->node.size;
159
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
160
		if (i915_vma_is_pinned(vma))
161
			pinned += vma->node.size;
162
	mutex_unlock(&dev->struct_mutex);
163

164
	args->aper_size = ggtt->base.total;
165
	args->aper_available_size = args->aper_size - pinned;
166

167 168 169
	return 0;
}

170 171
static int
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
172
{
173
	struct address_space *mapping = obj->base.filp->f_mapping;
174 175 176 177
	char *vaddr = obj->phys_handle->vaddr;
	struct sg_table *st;
	struct scatterlist *sg;
	int i;
178

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
		return -EINVAL;

	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page))
			return PTR_ERR(page);

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

195
		put_page(page);
196 197 198
		vaddr += PAGE_SIZE;
	}

199
	i915_gem_chipset_flush(to_i915(obj->base.dev));
200 201 202 203 204 205 206 207 208 209 210 211 212

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return -ENOMEM;
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
213

214 215 216 217 218 219 220 221 222 223 224 225 226
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

	obj->pages = st;
	return 0;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
{
	int ret;

	BUG_ON(obj->madv == __I915_MADV_PURGED);
227

228
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
229
	if (WARN_ON(ret)) {
230 231 232 233 234 235 236 237 238 239
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;

	if (obj->dirty) {
240
		struct address_space *mapping = obj->base.filp->f_mapping;
241
		char *vaddr = obj->phys_handle->vaddr;
242 243 244
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
245 246 247 248 249 250 251 252 253 254 255 256 257 258
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
			if (obj->madv == I915_MADV_WILLNEED)
259
				mark_page_accessed(page);
260
			put_page(page);
261 262
			vaddr += PAGE_SIZE;
		}
263
		obj->dirty = 0;
264 265
	}

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
	sg_free_table(obj->pages);
	kfree(obj->pages);
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
	drm_pci_free(obj->base.dev, obj->phys_handle);
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

282
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
283 284 285
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
286 287 288
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
289

290 291 292 293
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
294
	 */
295 296 297 298 299 300
	ret = i915_gem_object_wait_rendering(obj, false);
	if (ret)
		return ret;

	i915_gem_retire_requests(to_i915(obj->base.dev));

301 302 303 304 305 306 307 308 309 310 311 312 313
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
 * @obj: i915 gem object
 * @readonly: waiting for just read access or read-write access
 */
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly)
{
	struct reservation_object *resv;
	struct i915_gem_active *active;
	unsigned long active_mask;
	int idx;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	if (!readonly) {
		active = obj->last_read;
		active_mask = i915_gem_object_get_active(obj);
	} else {
		active_mask = 1;
		active = &obj->last_write;
	}

	for_each_active(active_mask, idx) {
		int ret;

		ret = i915_gem_active_wait(&active[idx],
					   &obj->base.dev->struct_mutex);
		if (ret)
			return ret;
	}

	resv = i915_gem_object_get_dmabuf_resv(obj);
	if (resv) {
		long err;

		err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
							  MAX_SCHEDULE_TIMEOUT);
		if (err < 0)
			return err;
	}

	return 0;
}

361 362 363
/* A nonblocking variant of the above wait. Must be called prior to
 * acquiring the mutex for the object, as the object state may change
 * during this call. A reference must be held by the caller for the object.
364 365
 */
static __must_check int
366 367 368
__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
			struct intel_rps_client *rps,
			bool readonly)
369 370 371
{
	struct i915_gem_active *active;
	unsigned long active_mask;
372
	int idx;
373

374
	active_mask = __I915_BO_ACTIVE(obj);
375 376 377 378 379 380 381 382 383 384
	if (!active_mask)
		return 0;

	if (!readonly) {
		active = obj->last_read;
	} else {
		active_mask = 1;
		active = &obj->last_write;
	}

385 386
	for_each_active(active_mask, idx) {
		int ret;
387

388
		ret = i915_gem_active_wait_unlocked(&active[idx],
389 390
						    I915_WAIT_INTERRUPTIBLE,
						    NULL, rps);
391 392
		if (ret)
			return ret;
393 394
	}

395
	return 0;
396 397 398 399 400 401 402 403 404
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

	return &fpriv->rps;
}

405 406 407 408 409
int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
	drm_dma_handle_t *phys;
410
	int ret;
411 412 413 414 415 416 417 418 419 420 421 422 423 424

	if (obj->phys_handle) {
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
			return -EBUSY;

		return 0;
	}

	if (obj->madv != I915_MADV_WILLNEED)
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

C
Chris Wilson 已提交
425 426 427 428 429
	ret = i915_gem_object_unbind(obj);
	if (ret)
		return ret;

	ret = i915_gem_object_put_pages(obj);
430 431 432
	if (ret)
		return ret;

433 434 435 436 437 438
	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
	if (!phys)
		return -ENOMEM;

	obj->phys_handle = phys;
439 440 441
	obj->ops = &i915_gem_phys_ops;

	return i915_gem_object_get_pages(obj);
442 443 444 445 446 447 448 449 450
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
451
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
452
	int ret = 0;
453 454 455 456 457 458 459

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
	ret = i915_gem_object_wait_rendering(obj, false);
	if (ret)
		return ret;
460

461
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
462 463 464 465 466 467 468 469 470 471
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
472 473 474 475
		if (unwritten) {
			ret = -EFAULT;
			goto out;
		}
476 477
	}

478
	drm_clflush_virt_range(vaddr, args->size);
479
	i915_gem_chipset_flush(to_i915(dev));
480 481

out:
482
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
483
	return ret;
484 485
}

486 487
void *i915_gem_object_alloc(struct drm_device *dev)
{
488
	struct drm_i915_private *dev_priv = to_i915(dev);
489
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
490 491 492 493
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
494
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
495
	kmem_cache_free(dev_priv->objects, obj);
496 497
}

498 499 500 501 502
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
503
{
504
	struct drm_i915_gem_object *obj;
505 506
	int ret;
	u32 handle;
507

508
	size = roundup(size, PAGE_SIZE);
509 510
	if (size == 0)
		return -EINVAL;
511 512

	/* Allocate the new object */
513
	obj = i915_gem_object_create(dev, size);
514 515
	if (IS_ERR(obj))
		return PTR_ERR(obj);
516

517
	ret = drm_gem_handle_create(file, &obj->base, &handle);
518
	/* drop reference from allocate - handle holds it now */
519
	i915_gem_object_put_unlocked(obj);
520 521
	if (ret)
		return ret;
522

523
	*handle_p = handle;
524 525 526
	return 0;
}

527 528 529 530 531 532
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
533
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
534 535
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
536
			       args->size, &args->handle);
537 538 539 540
}

/**
 * Creates a new mm object and returns a handle to it.
541 542 543
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
544 545 546 547 548 549
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
550

551
	return i915_gem_create(file, dev,
552
			       args->size, &args->handle);
553 554
}

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

581
static inline int
582 583
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

607 608 609 610 611 612
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
613
				    unsigned int *needs_clflush)
614 615 616 617 618
{
	int ret;

	*needs_clflush = 0;

619 620
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
621

622 623 624 625
	ret = i915_gem_object_wait_rendering(obj, true);
	if (ret)
		return ret;

626 627 628 629 630 631
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

632 633
	i915_gem_object_flush_gtt_write_domain(obj);

634 635 636 637 638 639
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
640 641
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
642 643 644

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
645 646 647
		if (ret)
			goto err_unpin;

648
		*needs_clflush = 0;
649 650
	}

651
	/* return with the pages pinned */
652
	return 0;
653 654 655 656

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

	ret = i915_gem_object_wait_rendering(obj, false);
	if (ret)
		return ret;

672 673 674 675 676 677
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

678 679
	i915_gem_object_flush_gtt_write_domain(obj);

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;

	/* Same trick applies to invalidate partially written cachelines read
	 * before writing.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
							 obj->cache_level);

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
697 698 699
		if (ret)
			goto err_unpin;

700 701 702 703 704 705 706 707
		*needs_clflush = 0;
	}

	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
		obj->cache_dirty = true;

	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
	obj->dirty = 1;
708
	/* return with the pages pinned */
709
	return 0;
710 711 712 713

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
714 715
}

716 717 718
/* Per-page copy function for the shmem pread fastpath.
 * Flushes invalid cachelines before reading the target if
 * needs_clflush is set. */
719
static int
720 721 722 723 724 725 726
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

727
	if (unlikely(page_do_bit17_swizzling))
728 729 730 731 732 733 734 735 736 737 738
		return -EINVAL;

	vaddr = kmap_atomic(page);
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_to_user_inatomic(user_data,
				      vaddr + shmem_page_offset,
				      page_length);
	kunmap_atomic(vaddr);

739
	return ret ? -EFAULT : 0;
740 741
}

742 743 744 745
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
746
	if (unlikely(swizzled)) {
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

764 765 766 767 768 769 770 771 772 773 774 775
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
776 777 778
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
779 780 781 782 783 784 785 786 787 788 789

	if (page_do_bit17_swizzling)
		ret = __copy_to_user_swizzled(user_data,
					      vaddr, shmem_page_offset,
					      page_length);
	else
		ret = __copy_to_user(user_data,
				     vaddr + shmem_page_offset,
				     page_length);
	kunmap(page);

790
	return ret ? - EFAULT : 0;
791 792
}

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
static inline unsigned long
slow_user_access(struct io_mapping *mapping,
		 uint64_t page_base, int page_offset,
		 char __user *user_data,
		 unsigned long length, bool pwrite)
{
	void __iomem *ioaddr;
	void *vaddr;
	uint64_t unwritten;

	ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force *)ioaddr + page_offset;
	if (pwrite)
		unwritten = __copy_from_user(vaddr, user_data, length);
	else
		unwritten = __copy_to_user(user_data, vaddr, length);

	io_mapping_unmap(ioaddr);
	return unwritten;
}

static int
i915_gem_gtt_pread(struct drm_device *dev,
		   struct drm_i915_gem_object *obj, uint64_t size,
		   uint64_t data_offset, uint64_t data_ptr)
{
820
	struct drm_i915_private *dev_priv = to_i915(dev);
821
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
C
Chris Wilson 已提交
822
	struct i915_vma *vma;
823 824 825 826 827 828
	struct drm_mm_node node;
	char __user *user_data;
	uint64_t remain;
	uint64_t offset;
	int ret;

829
	intel_runtime_pm_get(to_i915(dev));
C
Chris Wilson 已提交
830
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
831 832 833
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
834
		ret = i915_vma_put_fence(vma);
835 836 837 838 839
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
840
	if (IS_ERR(vma)) {
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
		if (ret)
			goto out;

		ret = i915_gem_object_get_pages(obj);
		if (ret) {
			remove_mappable_node(&node);
			goto out;
		}

		i915_gem_object_pin_pages(obj);
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

	user_data = u64_to_user_ptr(data_ptr);
	remain = size;
	offset = data_offset;

	mutex_unlock(&dev->struct_mutex);
	if (likely(!i915.prefault_disable)) {
		ret = fault_in_multipages_writeable(user_data, remain);
		if (ret) {
			mutex_lock(&dev->struct_mutex);
			goto out_unpin;
		}
	}

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start,
					       I915_CACHE_NONE, 0);
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
		/* This is a slow read/write as it tries to read from
		 * and write to user memory which may result into page
		 * faults, and so we cannot perform this under struct_mutex.
		 */
896
		if (slow_user_access(&ggtt->mappable, page_base,
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
				     page_offset, user_data,
				     page_length, false)) {
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

	mutex_lock(&dev->struct_mutex);
	if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
		/* The user has modified the object whilst we tried
		 * reading from it, and we now have no idea what domain
		 * the pages should be in. As we have just been touching
		 * them directly, flush everything back to the GTT
		 * domain.
		 */
		ret = i915_gem_object_set_to_gtt_domain(obj, false);
	}

out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
923
				       node.start, node.size);
924 925 926
		i915_gem_object_unpin_pages(obj);
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
927
		i915_vma_unpin(vma);
928 929
	}
out:
930
	intel_runtime_pm_put(to_i915(dev));
931 932 933
	return ret;
}

934
static int
935 936 937 938
i915_gem_shmem_pread(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args,
		     struct drm_file *file)
939
{
940
	char __user *user_data;
941
	ssize_t remain;
942
	loff_t offset;
943
	int shmem_page_offset, page_length, ret = 0;
944
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
945
	int prefaulted = 0;
946
	int needs_clflush = 0;
947
	struct sg_page_iter sg_iter;
948

949
	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
950 951 952
	if (ret)
		return ret;

953 954
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
	user_data = u64_to_user_ptr(args->data_ptr);
955
	offset = args->offset;
956
	remain = args->size;
957

958 959
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
960
		struct page *page = sg_page_iter_page(&sg_iter);
961 962 963 964

		if (remain <= 0)
			break;

965 966 967 968 969
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
970
		shmem_page_offset = offset_in_page(offset);
971 972 973 974
		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

975 976 977
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

978 979 980 981 982
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
		if (ret == 0)
			goto next_page;
983 984 985

		mutex_unlock(&dev->struct_mutex);

986
		if (likely(!i915.prefault_disable) && !prefaulted) {
987
			ret = fault_in_multipages_writeable(user_data, remain);
988 989 990 991 992 993 994
			/* Userspace is tricking us, but we've already clobbered
			 * its pages with the prefault and promised to write the
			 * data up to the first fault. Hence ignore any errors
			 * and just continue. */
			(void)ret;
			prefaulted = 1;
		}
995

996 997 998
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
999

1000
		mutex_lock(&dev->struct_mutex);
1001 1002

		if (ret)
1003 1004
			goto out;

1005
next_page:
1006
		remain -= page_length;
1007
		user_data += page_length;
1008 1009 1010
		offset += page_length;
	}

1011
out:
1012
	i915_gem_obj_finish_shmem_access(obj);
1013

1014 1015 1016
	return ret;
}

1017 1018
/**
 * Reads data from the object referenced by handle.
1019 1020 1021
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1022 1023 1024 1025 1026
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1027
		     struct drm_file *file)
1028 1029
{
	struct drm_i915_gem_pread *args = data;
1030
	struct drm_i915_gem_object *obj;
1031
	int ret = 0;
1032

1033 1034 1035 1036
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1037
		       u64_to_user_ptr(args->data_ptr),
1038 1039 1040
		       args->size))
		return -EFAULT;

1041
	obj = i915_gem_object_lookup(file, args->handle);
1042 1043
	if (!obj)
		return -ENOENT;
1044

1045
	/* Bounds check source.  */
1046 1047
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1048
		ret = -EINVAL;
1049
		goto err;
C
Chris Wilson 已提交
1050 1051
	}

C
Chris Wilson 已提交
1052 1053
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1054 1055 1056 1057 1058 1059 1060 1061
	ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
	if (ret)
		goto err;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err;

1062
	ret = i915_gem_shmem_pread(dev, obj, args, file);
1063

1064
	/* pread for non shmem backed objects */
1065
	if (ret == -EFAULT || ret == -ENODEV)
1066 1067 1068
		ret = i915_gem_gtt_pread(dev, obj, args->size,
					args->offset, args->data_ptr);

1069
	i915_gem_object_put(obj);
1070
	mutex_unlock(&dev->struct_mutex);
1071 1072 1073 1074 1075

	return ret;

err:
	i915_gem_object_put_unlocked(obj);
1076
	return ret;
1077 1078
}

1079 1080
/* This is the fast write path which cannot handle
 * page faults in the source data
1081
 */
1082 1083 1084 1085 1086 1087

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
1088
{
1089 1090
	void __iomem *vaddr_atomic;
	void *vaddr;
1091
	unsigned long unwritten;
1092

P
Peter Zijlstra 已提交
1093
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
1094 1095 1096
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force*)vaddr_atomic + page_offset;
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
1097
						      user_data, length);
P
Peter Zijlstra 已提交
1098
	io_mapping_unmap_atomic(vaddr_atomic);
1099
	return unwritten;
1100 1101
}

1102 1103 1104
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1105
 * @i915: i915 device private data
1106 1107 1108
 * @obj: i915 gem object
 * @args: pwrite arguments structure
 * @file: drm file pointer
1109
 */
1110
static int
1111
i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
1112
			 struct drm_i915_gem_object *obj,
1113
			 struct drm_i915_gem_pwrite *args,
1114
			 struct drm_file *file)
1115
{
1116
	struct i915_ggtt *ggtt = &i915->ggtt;
1117
	struct drm_device *dev = obj->base.dev;
C
Chris Wilson 已提交
1118
	struct i915_vma *vma;
1119 1120
	struct drm_mm_node node;
	uint64_t remain, offset;
1121
	char __user *user_data;
1122
	int ret;
1123 1124
	bool hit_slow_path = false;

1125
	if (i915_gem_object_is_tiled(obj))
1126
		return -EFAULT;
D
Daniel Vetter 已提交
1127

1128
	intel_runtime_pm_get(i915);
C
Chris Wilson 已提交
1129
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1130
				       PIN_MAPPABLE | PIN_NONBLOCK);
1131 1132 1133
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1134
		ret = i915_vma_put_fence(vma);
1135 1136 1137 1138 1139
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1140
	if (IS_ERR(vma)) {
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
		ret = insert_mappable_node(i915, &node, PAGE_SIZE);
		if (ret)
			goto out;

		ret = i915_gem_object_get_pages(obj);
		if (ret) {
			remove_mappable_node(&node);
			goto out;
		}

		i915_gem_object_pin_pages(obj);
	}
D
Daniel Vetter 已提交
1153 1154 1155 1156 1157

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1158
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1159
	obj->dirty = true;
1160

1161 1162 1163 1164
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1165 1166
		/* Operation in this page
		 *
1167 1168 1169
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1170
		 */
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1184
		/* If we get a fault while copying data, then (presumably) our
1185 1186
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1187 1188
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1189
		 */
1190
		if (fast_user_write(&ggtt->mappable, page_base,
D
Daniel Vetter 已提交
1191
				    page_offset, user_data, page_length)) {
1192 1193
			hit_slow_path = true;
			mutex_unlock(&dev->struct_mutex);
1194
			if (slow_user_access(&ggtt->mappable,
1195 1196 1197 1198 1199 1200 1201 1202 1203
					     page_base,
					     page_offset, user_data,
					     page_length, true)) {
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto out_flush;
			}

			mutex_lock(&dev->struct_mutex);
D
Daniel Vetter 已提交
1204
		}
1205

1206 1207 1208
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1209 1210
	}

1211
out_flush:
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
	if (hit_slow_path) {
		if (ret == 0 &&
		    (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
			/* The user has modified the object whilst we tried
			 * reading from it, and we now have no idea what domain
			 * the pages should be in. As we have just been touching
			 * them directly, flush everything back to the GTT
			 * domain.
			 */
			ret = i915_gem_object_set_to_gtt_domain(obj, false);
		}
	}

1225
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
D
Daniel Vetter 已提交
1226
out_unpin:
1227 1228 1229
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1230
				       node.start, node.size);
1231 1232 1233
		i915_gem_object_unpin_pages(obj);
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1234
		i915_vma_unpin(vma);
1235
	}
D
Daniel Vetter 已提交
1236
out:
1237
	intel_runtime_pm_put(i915);
1238
	return ret;
1239 1240
}

1241 1242 1243 1244
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set. */
1245
static int
1246 1247 1248 1249 1250
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1251
{
1252
	char *vaddr;
1253
	int ret;
1254

1255
	if (unlikely(page_do_bit17_swizzling))
1256
		return -EINVAL;
1257

1258 1259 1260 1261
	vaddr = kmap_atomic(page);
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
1262 1263
	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
					user_data, page_length);
1264 1265 1266 1267
	if (needs_clflush_after)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	kunmap_atomic(vaddr);
1268

1269
	return ret ? -EFAULT : 0;
1270 1271
}

1272 1273
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
1274
static int
1275 1276 1277 1278 1279
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1280
{
1281 1282
	char *vaddr;
	int ret;
1283

1284
	vaddr = kmap(page);
1285
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1286 1287 1288
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
1289 1290
	if (page_do_bit17_swizzling)
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1291 1292
						user_data,
						page_length);
1293 1294 1295 1296 1297
	else
		ret = __copy_from_user(vaddr + shmem_page_offset,
				       user_data,
				       page_length);
	if (needs_clflush_after)
1298 1299 1300
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
1301
	kunmap(page);
1302

1303
	return ret ? -EFAULT : 0;
1304 1305 1306
}

static int
1307 1308 1309 1310
i915_gem_shmem_pwrite(struct drm_device *dev,
		      struct drm_i915_gem_object *obj,
		      struct drm_i915_gem_pwrite *args,
		      struct drm_file *file)
1311 1312
{
	ssize_t remain;
1313 1314
	loff_t offset;
	char __user *user_data;
1315
	int shmem_page_offset, page_length, ret = 0;
1316
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1317
	int hit_slowpath = 0;
1318
	unsigned int needs_clflush;
1319
	struct sg_page_iter sg_iter;
1320

1321
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1322 1323 1324
	if (ret)
		return ret;

1325 1326
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
	user_data = u64_to_user_ptr(args->data_ptr);
1327
	offset = args->offset;
1328
	remain = args->size;
1329

1330 1331
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
1332
		struct page *page = sg_page_iter_page(&sg_iter);
1333
		int partial_cacheline_write;
1334

1335 1336 1337
		if (remain <= 0)
			break;

1338 1339 1340 1341 1342
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
1343
		shmem_page_offset = offset_in_page(offset);
1344 1345 1346 1347 1348

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

1349 1350 1351
		/* If we don't overwrite a cacheline completely we need to be
		 * careful to have up-to-date data by first clflushing. Don't
		 * overcomplicate things and flush the entire patch. */
1352
		partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
1353 1354 1355
			((shmem_page_offset | page_length)
				& (boot_cpu_data.x86_clflush_size - 1));

1356 1357 1358
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

1359 1360 1361
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
1362
					needs_clflush & CLFLUSH_AFTER);
1363 1364
		if (ret == 0)
			goto next_page;
1365 1366 1367

		hit_slowpath = 1;
		mutex_unlock(&dev->struct_mutex);
1368 1369 1370
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
1371
					needs_clflush & CLFLUSH_AFTER);
1372

1373
		mutex_lock(&dev->struct_mutex);
1374 1375

		if (ret)
1376 1377
			goto out;

1378
next_page:
1379
		remain -= page_length;
1380
		user_data += page_length;
1381
		offset += page_length;
1382 1383
	}

1384
out:
1385
	i915_gem_obj_finish_shmem_access(obj);
1386

1387
	if (hit_slowpath) {
1388 1389 1390 1391 1392
		/*
		 * Fixup: Flush cpu caches in case we didn't flush the dirty
		 * cachelines in-line while writing and the object moved
		 * out of the cpu write domain while we've dropped the lock.
		 */
1393
		if (!(needs_clflush & CLFLUSH_AFTER) &&
1394
		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1395
			if (i915_gem_clflush_object(obj, obj->pin_display))
1396
				needs_clflush |= CLFLUSH_AFTER;
1397
		}
1398
	}
1399

1400
	if (needs_clflush & CLFLUSH_AFTER)
1401
		i915_gem_chipset_flush(to_i915(dev));
1402

1403
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1404
	return ret;
1405 1406 1407 1408
}

/**
 * Writes data to the object referenced by handle.
1409 1410 1411
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1412 1413 1414 1415 1416
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1417
		      struct drm_file *file)
1418
{
1419
	struct drm_i915_private *dev_priv = to_i915(dev);
1420
	struct drm_i915_gem_pwrite *args = data;
1421
	struct drm_i915_gem_object *obj;
1422 1423 1424 1425 1426 1427
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1428
		       u64_to_user_ptr(args->data_ptr),
1429 1430 1431
		       args->size))
		return -EFAULT;

1432
	if (likely(!i915.prefault_disable)) {
1433
		ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1434 1435 1436 1437
						   args->size);
		if (ret)
			return -EFAULT;
	}
1438

1439
	obj = i915_gem_object_lookup(file, args->handle);
1440 1441
	if (!obj)
		return -ENOENT;
1442

1443
	/* Bounds check destination. */
1444 1445
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1446
		ret = -EINVAL;
1447
		goto err;
C
Chris Wilson 已提交
1448 1449
	}

C
Chris Wilson 已提交
1450 1451
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
	ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
	if (ret)
		goto err;

	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;

D
Daniel Vetter 已提交
1462
	ret = -EFAULT;
1463 1464 1465 1466 1467 1468
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1469
	if (!i915_gem_object_has_struct_page(obj) ||
1470
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1471 1472
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1473 1474 1475
		 * textures). Fallback to the shmem path in that case.
		 */
		ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1476

1477
	if (ret == -EFAULT || ret == -ENOSPC) {
1478 1479
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1480
		else
1481
			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1482
	}
1483

1484
	i915_gem_object_put(obj);
1485
	mutex_unlock(&dev->struct_mutex);
1486 1487
	intel_runtime_pm_put(dev_priv);

1488
	return ret;
1489 1490 1491 1492 1493 1494

err_rpm:
	intel_runtime_pm_put(dev_priv);
err:
	i915_gem_object_put_unlocked(obj);
	return ret;
1495 1496
}

1497
static inline enum fb_op_origin
1498 1499
write_origin(struct drm_i915_gem_object *obj, unsigned domain)
{
1500 1501
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1502 1503
}

1504
/**
1505 1506
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1507 1508 1509
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1510 1511 1512
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1513
			  struct drm_file *file)
1514 1515
{
	struct drm_i915_gem_set_domain *args = data;
1516
	struct drm_i915_gem_object *obj;
1517 1518
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1519 1520
	int ret;

1521
	/* Only handle setting domains to types used by the CPU. */
1522
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1523 1524 1525 1526 1527 1528 1529 1530
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1531
	obj = i915_gem_object_lookup(file, args->handle);
1532 1533
	if (!obj)
		return -ENOENT;
1534

1535 1536 1537 1538
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1539 1540 1541 1542 1543
	ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
	if (ret)
		goto err;

	ret = i915_mutex_lock_interruptible(dev);
1544
	if (ret)
1545
		goto err;
1546

1547
	if (read_domains & I915_GEM_DOMAIN_GTT)
1548
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1549
	else
1550
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1551

1552
	if (write_domain != 0)
1553
		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1554

1555
	i915_gem_object_put(obj);
1556 1557
	mutex_unlock(&dev->struct_mutex);
	return ret;
1558 1559 1560 1561

err:
	i915_gem_object_put_unlocked(obj);
	return ret;
1562 1563 1564 1565
}

/**
 * Called when user space has done writes to this buffer
1566 1567 1568
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1569 1570 1571
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1572
			 struct drm_file *file)
1573 1574
{
	struct drm_i915_gem_sw_finish *args = data;
1575
	struct drm_i915_gem_object *obj;
1576
	int err = 0;
1577

1578
	obj = i915_gem_object_lookup(file, args->handle);
1579 1580
	if (!obj)
		return -ENOENT;
1581 1582

	/* Pinned buffers may be scanout, so flush the cache */
1583 1584 1585 1586 1587 1588 1589
	if (READ_ONCE(obj->pin_display)) {
		err = i915_mutex_lock_interruptible(dev);
		if (!err) {
			i915_gem_object_flush_cpu_write_domain(obj);
			mutex_unlock(&dev->struct_mutex);
		}
	}
1590

1591 1592
	i915_gem_object_put_unlocked(obj);
	return err;
1593 1594 1595
}

/**
1596 1597 1598 1599 1600
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1601 1602 1603
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1614 1615 1616
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1617
		    struct drm_file *file)
1618 1619
{
	struct drm_i915_gem_mmap *args = data;
1620
	struct drm_i915_gem_object *obj;
1621 1622
	unsigned long addr;

1623 1624 1625
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1626
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1627 1628
		return -ENODEV;

1629 1630
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1631
		return -ENOENT;
1632

1633 1634 1635
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1636
	if (!obj->base.filp) {
1637
		i915_gem_object_put_unlocked(obj);
1638 1639 1640
		return -EINVAL;
	}

1641
	addr = vm_mmap(obj->base.filp, 0, args->size,
1642 1643
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1644 1645 1646 1647
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1648
		if (down_write_killable(&mm->mmap_sem)) {
1649
			i915_gem_object_put_unlocked(obj);
1650 1651
			return -EINTR;
		}
1652 1653 1654 1655 1656 1657 1658
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1659 1660

		/* This may race, but that's ok, it only gets set */
1661
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1662
	}
1663
	i915_gem_object_put_unlocked(obj);
1664 1665 1666 1667 1668 1669 1670 1671
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
	u64 size;

	size = i915_gem_object_get_stride(obj);
	size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;

	return size >> PAGE_SHIFT;
}

1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
	return 1;
}

1732 1733
/**
 * i915_gem_fault - fault a page into the GTT
C
Chris Wilson 已提交
1734
 * @area: CPU VMA in question
1735
 * @vmf: fault info
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1747 1748 1749
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1750
 */
C
Chris Wilson 已提交
1751
int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1752
{
1753
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
C
Chris Wilson 已提交
1754
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1755
	struct drm_device *dev = obj->base.dev;
1756 1757
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1758
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
1759
	struct i915_vma *vma;
1760
	pgoff_t page_offset;
1761
	unsigned int flags;
1762
	int ret;
1763

1764
	/* We don't use vmf->pgoff since that has the fake offset */
C
Chris Wilson 已提交
1765
	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1766 1767
		PAGE_SHIFT;

C
Chris Wilson 已提交
1768 1769
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1770
	/* Try to flush the object off the GPU first without holding the lock.
1771
	 * Upon acquiring the lock, we will perform our sanity checks and then
1772 1773 1774
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
1775
	ret = __unsafe_wait_rendering(obj, NULL, !write);
1776
	if (ret)
1777 1778 1779 1780 1781 1782 1783
		goto err;

	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
1784

1785 1786
	/* Access to snoopable pages through the GTT is incoherent. */
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1787
		ret = -EFAULT;
1788
		goto err_unlock;
1789 1790
	}

1791 1792 1793 1794 1795 1796 1797 1798
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

1799
	/* Now pin it into the GTT as needed */
1800
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1801 1802
	if (IS_ERR(vma)) {
		struct i915_ggtt_view view;
1803 1804
		unsigned int chunk_size;

1805
		/* Use a partial view if it is bigger than available space */
1806 1807 1808
		chunk_size = MIN_CHUNK_PAGES;
		if (i915_gem_object_is_tiled(obj))
			chunk_size = max(chunk_size, tile_row_pages(obj));
1809

1810 1811 1812 1813
		memset(&view, 0, sizeof(view));
		view.type = I915_GGTT_VIEW_PARTIAL;
		view.params.partial.offset = rounddown(page_offset, chunk_size);
		view.params.partial.size =
1814
			min_t(unsigned int, chunk_size,
1815
			      vma_pages(area) - view.params.partial.offset);
1816

1817 1818 1819 1820 1821 1822
		/* If the partial covers the entire object, just create a
		 * normal VMA.
		 */
		if (chunk_size >= obj->base.size >> PAGE_SHIFT)
			view.type = I915_GGTT_VIEW_NORMAL;

1823 1824 1825 1826 1827
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

1828 1829
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
1830 1831
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
1832
		goto err_unlock;
C
Chris Wilson 已提交
1833
	}
1834

1835 1836
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
1837
		goto err_unpin;
1838

1839
	ret = i915_vma_get_fence(vma);
1840
	if (ret)
1841
		goto err_unpin;
1842

1843
	/* Mark as being mmapped into userspace for later revocation */
1844
	assert_rpm_wakelock_held(dev_priv);
1845 1846 1847 1848 1849
	spin_lock(&dev_priv->mm.userfault_lock);
	if (list_empty(&obj->userfault_link))
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
	spin_unlock(&dev_priv->mm.userfault_lock);

1850
	/* Finally, remap it using the new GTT offset */
1851 1852 1853 1854 1855
	ret = remap_io_mapping(area,
			       area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);
1856

1857
err_unpin:
C
Chris Wilson 已提交
1858
	__i915_vma_unpin(vma);
1859
err_unlock:
1860
	mutex_unlock(&dev->struct_mutex);
1861 1862 1863
err_rpm:
	intel_runtime_pm_put(dev_priv);
err:
1864
	switch (ret) {
1865
	case -EIO:
1866 1867 1868 1869 1870 1871 1872
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1873 1874 1875
			ret = VM_FAULT_SIGBUS;
			break;
		}
1876
	case -EAGAIN:
D
Daniel Vetter 已提交
1877 1878 1879 1880
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1881
		 */
1882 1883
	case 0:
	case -ERESTARTSYS:
1884
	case -EINTR:
1885 1886 1887 1888 1889
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1890 1891
		ret = VM_FAULT_NOPAGE;
		break;
1892
	case -ENOMEM:
1893 1894
		ret = VM_FAULT_OOM;
		break;
1895
	case -ENOSPC:
1896
	case -EFAULT:
1897 1898
		ret = VM_FAULT_SIGBUS;
		break;
1899
	default:
1900
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1901 1902
		ret = VM_FAULT_SIGBUS;
		break;
1903
	}
1904
	return ret;
1905 1906
}

1907 1908 1909 1910
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1911
 * Preserve the reservation of the mmapping with the DRM core code, but
1912 1913 1914 1915 1916 1917 1918 1919 1920
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1921
void
1922
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1923
{
1924 1925 1926
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	bool zap = false;

1927 1928 1929
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
1930 1931 1932 1933
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
1934
	 */
1935
	lockdep_assert_held(&i915->drm.struct_mutex);
1936
	intel_runtime_pm_get(i915);
1937

1938 1939 1940 1941 1942 1943 1944
	spin_lock(&i915->mm.userfault_lock);
	if (!list_empty(&obj->userfault_link)) {
		list_del_init(&obj->userfault_link);
		zap = true;
	}
	spin_unlock(&i915->mm.userfault_lock);
	if (!zap)
1945
		goto out;
1946

1947 1948
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
1949 1950 1951 1952 1953 1954 1955 1956 1957

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
1958 1959 1960

out:
	intel_runtime_pm_put(i915);
1961 1962
}

1963 1964 1965 1966 1967
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;

1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
	spin_lock(&dev_priv->mm.userfault_lock);
	while ((obj = list_first_entry_or_null(&dev_priv->mm.userfault_list,
					       struct drm_i915_gem_object,
					       userfault_link))) {
		list_del_init(&obj->userfault_link);
		spin_unlock(&dev_priv->mm.userfault_lock);

		drm_vma_node_unmap(&obj->base.vma_node,
				   obj->base.dev->anon_inode->i_mapping);

		spin_lock(&dev_priv->mm.userfault_lock);
	}
	spin_unlock(&dev_priv->mm.userfault_lock);
1981 1982
}

1983 1984
/**
 * i915_gem_get_ggtt_size - return required global GTT size for an object
1985
 * @dev_priv: i915 device
1986 1987 1988 1989 1990 1991
 * @size: object size
 * @tiling_mode: tiling mode
 *
 * Return the required global GTT size for an object, taking into account
 * potential fence register mapping.
 */
1992 1993
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
			   u64 size, int tiling_mode)
1994
{
1995
	u64 ggtt_size;
1996

1997 1998
	GEM_BUG_ON(size == 0);

1999
	if (INTEL_GEN(dev_priv) >= 4 ||
2000 2001
	    tiling_mode == I915_TILING_NONE)
		return size;
2002 2003

	/* Previous chips need a power-of-two fence region when tiling */
2004
	if (IS_GEN3(dev_priv))
2005
		ggtt_size = 1024*1024;
2006
	else
2007
		ggtt_size = 512*1024;
2008

2009 2010
	while (ggtt_size < size)
		ggtt_size <<= 1;
2011

2012
	return ggtt_size;
2013 2014
}

2015
/**
2016
 * i915_gem_get_ggtt_alignment - return required global GTT alignment
2017
 * @dev_priv: i915 device
2018 2019
 * @size: object size
 * @tiling_mode: tiling mode
2020
 * @fenced: is fenced alignment required or not
2021
 *
2022
 * Return the required global GTT alignment for an object, taking into account
2023
 * potential fence register mapping.
2024
 */
2025
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2026
				int tiling_mode, bool fenced)
2027
{
2028 2029
	GEM_BUG_ON(size == 0);

2030 2031 2032 2033
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
2034
	if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
2035
	    tiling_mode == I915_TILING_NONE)
2036 2037
		return 4096;

2038 2039 2040 2041
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
2042
	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2043 2044
}

2045 2046
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2047
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2048
	int err;
2049

2050 2051 2052
	err = drm_gem_create_mmap_offset(&obj->base);
	if (!err)
		return 0;
2053

2054 2055 2056
	/* We can idle the GPU locklessly to flush stale objects, but in order
	 * to claim that space for ourselves, we need to take the big
	 * struct_mutex to free the requests+objects and allocate our slot.
2057
	 */
2058
	err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2059 2060 2061 2062 2063 2064 2065 2066 2067
	if (err)
		return err;

	err = i915_mutex_lock_interruptible(&dev_priv->drm);
	if (!err) {
		i915_gem_retire_requests(dev_priv);
		err = drm_gem_create_mmap_offset(&obj->base);
		mutex_unlock(&dev_priv->drm.struct_mutex);
	}
2068

2069
	return err;
2070 2071 2072 2073 2074 2075 2076
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2077
int
2078 2079
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2080
		  uint32_t handle,
2081
		  uint64_t *offset)
2082
{
2083
	struct drm_i915_gem_object *obj;
2084 2085
	int ret;

2086
	obj = i915_gem_object_lookup(file, handle);
2087 2088
	if (!obj)
		return -ENOENT;
2089

2090
	ret = i915_gem_object_create_mmap_offset(obj);
2091 2092
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2093

2094
	i915_gem_object_put_unlocked(obj);
2095
	return ret;
2096 2097
}

2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2119
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2120 2121
}

D
Daniel Vetter 已提交
2122 2123 2124
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2125
{
2126
	i915_gem_object_free_mmap_offset(obj);
2127

2128 2129
	if (obj->base.filp == NULL)
		return;
2130

D
Daniel Vetter 已提交
2131 2132 2133 2134 2135
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2136
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
D
Daniel Vetter 已提交
2137 2138
	obj->madv = __I915_MADV_PURGED;
}
2139

2140 2141 2142
/* Try to discard unwanted pages */
static void
i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2143
{
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
	struct address_space *mapping;

	switch (obj->madv) {
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2156
	mapping = obj->base.filp->f_mapping,
2157
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2158 2159
}

2160
static void
2161
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2162
{
2163 2164
	struct sgt_iter sgt_iter;
	struct page *page;
2165
	int ret;
2166

2167
	BUG_ON(obj->madv == __I915_MADV_PURGED);
2168

C
Chris Wilson 已提交
2169
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
2170
	if (WARN_ON(ret)) {
C
Chris Wilson 已提交
2171 2172 2173
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
2174
		i915_gem_clflush_object(obj, true);
C
Chris Wilson 已提交
2175 2176 2177
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

I
Imre Deak 已提交
2178 2179
	i915_gem_gtt_finish_object(obj);

2180
	if (i915_gem_object_needs_bit17_swizzle(obj))
2181 2182
		i915_gem_object_save_bit_17_swizzle(obj);

2183 2184
	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
2185

2186
	for_each_sgt_page(page, sgt_iter, obj->pages) {
2187
		if (obj->dirty)
2188
			set_page_dirty(page);
2189

2190
		if (obj->madv == I915_MADV_WILLNEED)
2191
			mark_page_accessed(page);
2192

2193
		put_page(page);
2194
	}
2195
	obj->dirty = 0;
2196

2197 2198
	sg_free_table(obj->pages);
	kfree(obj->pages);
2199
}
C
Chris Wilson 已提交
2200

2201
int
2202 2203 2204 2205
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
	const struct drm_i915_gem_object_ops *ops = obj->ops;

2206
	if (obj->pages == NULL)
2207 2208
		return 0;

2209 2210 2211
	if (obj->pages_pin_count)
		return -EBUSY;

2212
	GEM_BUG_ON(obj->bind_count);
B
Ben Widawsky 已提交
2213

2214 2215 2216
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2217
	list_del(&obj->global_list);
2218

2219
	if (obj->mapping) {
2220 2221 2222 2223 2224
		void *ptr;

		ptr = ptr_mask_bits(obj->mapping);
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2225
		else
2226 2227
			kunmap(kmap_to_page(ptr));

2228 2229 2230
		obj->mapping = NULL;
	}

2231
	ops->put_pages(obj);
2232
	obj->pages = NULL;
2233

2234
	i915_gem_object_invalidate(obj);
C
Chris Wilson 已提交
2235 2236 2237 2238

	return 0;
}

2239
static unsigned int swiotlb_max_size(void)
2240 2241 2242 2243 2244 2245 2246 2247
{
#if IS_ENABLED(CONFIG_SWIOTLB)
	return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
	return 0;
#endif
}

2248
static int
C
Chris Wilson 已提交
2249
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2250
{
2251
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2252 2253
	int page_count, i;
	struct address_space *mapping;
2254 2255
	struct sg_table *st;
	struct scatterlist *sg;
2256
	struct sgt_iter sgt_iter;
2257
	struct page *page;
2258
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2259
	unsigned int max_segment;
I
Imre Deak 已提交
2260
	int ret;
C
Chris Wilson 已提交
2261
	gfp_t gfp;
2262

C
Chris Wilson 已提交
2263 2264 2265 2266 2267 2268 2269
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);

2270 2271
	max_segment = swiotlb_max_size();
	if (!max_segment)
2272
		max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2273

2274 2275 2276 2277
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

2278
	page_count = obj->base.size / PAGE_SIZE;
2279 2280
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2281
		return -ENOMEM;
2282
	}
2283

2284 2285 2286 2287 2288
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2289
	mapping = obj->base.filp->f_mapping;
2290
	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2291
	gfp |= __GFP_NORETRY | __GFP_NOWARN;
2292 2293 2294
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
2295 2296
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
2297 2298 2299 2300 2301
			i915_gem_shrink(dev_priv,
					page_count,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_PURGEABLE);
C
Chris Wilson 已提交
2302 2303 2304 2305 2306 2307 2308
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
2309
			page = shmem_read_mapping_page(mapping, i);
I
Imre Deak 已提交
2310 2311
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
C
Chris Wilson 已提交
2312
				goto err_pages;
I
Imre Deak 已提交
2313
			}
C
Chris Wilson 已提交
2314
		}
2315 2316 2317
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2318 2319 2320 2321 2322 2323 2324 2325
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2326 2327 2328

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2329
	}
2330
	if (sg) /* loop terminated early; short sg table */
2331
		sg_mark_end(sg);
2332 2333
	obj->pages = st;

I
Imre Deak 已提交
2334 2335 2336 2337
	ret = i915_gem_gtt_prepare_object(obj);
	if (ret)
		goto err_pages;

2338
	if (i915_gem_object_needs_bit17_swizzle(obj))
2339 2340
		i915_gem_object_do_bit_17_swizzle(obj);

2341
	if (i915_gem_object_is_tiled(obj) &&
2342 2343 2344
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		i915_gem_object_pin_pages(obj);

2345 2346 2347
	return 0;

err_pages:
2348
	sg_mark_end(sg);
2349 2350
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2351 2352
	sg_free_table(st);
	kfree(st);
2353 2354 2355 2356 2357 2358 2359 2360 2361

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2362 2363 2364 2365
	if (ret == -ENOSPC)
		ret = -ENOMEM;

	return ret;
2366 2367
}

2368 2369 2370 2371 2372 2373 2374 2375 2376 2377
/* Ensure that the associated pages are gathered from the backing storage
 * and pinned into our object. i915_gem_object_get_pages() may be called
 * multiple times before they are released by a single call to
 * i915_gem_object_put_pages() - once the pages are no longer referenced
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
2378
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2379 2380 2381
	const struct drm_i915_gem_object_ops *ops = obj->ops;
	int ret;

2382
	if (obj->pages)
2383 2384
		return 0;

2385
	if (obj->madv != I915_MADV_WILLNEED) {
2386
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
2387
		return -EFAULT;
2388 2389
	}

2390 2391
	BUG_ON(obj->pages_pin_count);

2392 2393 2394 2395
	ret = ops->get_pages(obj);
	if (ret)
		return ret;

2396
	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2397 2398 2399 2400

	obj->get_page.sg = obj->pages->sgl;
	obj->get_page.last = 0;

2401
	return 0;
2402 2403
}

2404
/* The 'mapping' part of i915_gem_object_pin_map() below */
2405 2406
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2407 2408 2409
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
	struct sg_table *sgt = obj->pages;
2410 2411
	struct sgt_iter sgt_iter;
	struct page *page;
2412 2413
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2414
	unsigned long i = 0;
2415
	pgprot_t pgprot;
2416 2417 2418
	void *addr;

	/* A single page can always be kmapped */
2419
	if (n_pages == 1 && type == I915_MAP_WB)
2420 2421
		return kmap(sg_page(sgt->sgl));

2422 2423 2424 2425 2426 2427
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
		if (!pages)
			return NULL;
	}
2428

2429 2430
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2431 2432 2433 2434

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2435 2436 2437 2438 2439 2440 2441 2442 2443
	switch (type) {
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2444

2445 2446
	if (pages != stack_pages)
		drm_free_large(pages);
2447 2448 2449 2450 2451

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2452 2453
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2454
{
2455 2456 2457
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2458 2459 2460
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
2461
	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2462 2463 2464 2465 2466 2467

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	i915_gem_object_pin_pages(obj);
2468
	pinned = obj->pages_pin_count > 1;
2469

2470 2471 2472 2473 2474
	ptr = ptr_unpack_bits(obj->mapping, has_type);
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
			goto err;
2475
		}
2476 2477 2478 2479 2480 2481 2482

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

		ptr = obj->mapping = NULL;
2483 2484
	}

2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
			goto err;
		}

		obj->mapping = ptr_pack_bits(ptr, type);
	}

	return ptr;

err:
	i915_gem_object_unpin_pages(obj);
	return ERR_PTR(ret);
2500 2501
}

2502
static void
2503 2504
i915_gem_object_retire__write(struct i915_gem_active *active,
			      struct drm_i915_gem_request *request)
B
Ben Widawsky 已提交
2505
{
2506 2507
	struct drm_i915_gem_object *obj =
		container_of(active, struct drm_i915_gem_object, last_write);
2508

2509
	intel_fb_obj_flush(obj, true, ORIGIN_CS);
B
Ben Widawsky 已提交
2510 2511
}

2512
static void
2513 2514
i915_gem_object_retire__read(struct i915_gem_active *active,
			     struct drm_i915_gem_request *request)
2515
{
2516 2517 2518
	int idx = request->engine->id;
	struct drm_i915_gem_object *obj =
		container_of(active, struct drm_i915_gem_object, last_read[idx]);
2519

2520
	GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
2521

2522 2523
	i915_gem_object_clear_active(obj, idx);
	if (i915_gem_object_is_active(obj))
2524
		return;
2525

2526 2527 2528 2529
	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
2530 2531 2532
	if (obj->bind_count)
		list_move_tail(&obj->global_list,
			       &request->i915->mm.bound_list);
2533

2534
	i915_gem_object_put(obj);
2535 2536
}

2537
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2538
{
2539
	unsigned long elapsed;
2540

2541
	if (ctx->hang_stats.banned)
2542 2543
		return true;

2544
	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2545 2546
	if (ctx->hang_stats.ban_period_seconds &&
	    elapsed <= ctx->hang_stats.ban_period_seconds) {
2547 2548
		DRM_DEBUG("context hanging too fast, banning!\n");
		return true;
2549 2550 2551 2552 2553
	}

	return false;
}

2554
static void i915_set_reset_status(struct i915_gem_context *ctx,
2555
				  const bool guilty)
2556
{
2557
	struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2558 2559

	if (guilty) {
2560
		hs->banned = i915_context_is_banned(ctx);
2561 2562 2563 2564
		hs->batch_active++;
		hs->guilty_ts = get_seconds();
	} else {
		hs->batch_pending++;
2565 2566 2567
	}
}

2568
struct drm_i915_gem_request *
2569
i915_gem_find_active_request(struct intel_engine_cs *engine)
2570
{
2571 2572
	struct drm_i915_gem_request *request;

2573 2574 2575 2576 2577 2578 2579 2580
	/* We are called by the error capture and reset at a random
	 * point in time. In particular, note that neither is crucially
	 * ordered with an interrupt. After a hang, the GPU is dead and we
	 * assume that no more writes can happen (we waited long enough for
	 * all writes that were in transaction to be flushed) - adding an
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
	 */
2581
	list_for_each_entry(request, &engine->request_list, link) {
2582
		if (i915_gem_request_completed(request))
2583
			continue;
2584

2585 2586 2587
		if (!i915_sw_fence_done(&request->submit))
			break;

2588
		return request;
2589
	}
2590 2591 2592 2593

	return NULL;
}

2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611
static void reset_request(struct drm_i915_gem_request *request)
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
}

static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2612 2613
{
	struct drm_i915_gem_request *request;
2614
	struct i915_gem_context *incomplete_ctx;
2615 2616
	bool ring_hung;

2617 2618 2619
	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);

2620
	request = i915_gem_find_active_request(engine);
2621
	if (!request)
2622 2623
		return;

2624
	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2625 2626 2627
	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
		ring_hung = false;

2628
	i915_set_reset_status(request->ctx, ring_hung);
2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649
	if (!ring_hung)
		return;

	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
			 engine->name, request->fence.seqno);

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset_hw(engine, request);

	/* Users of the default context do not rely on logical state
	 * preserved between batches. They have to emit full state on
	 * every batch and so it is safe to execute queued requests following
	 * the hang.
	 *
	 * Other contexts preserve state, now corrupt. We want to skip all
	 * queued requests that reference the corrupt context.
	 */
	incomplete_ctx = request->ctx;
	if (i915_gem_context_is_default(incomplete_ctx))
		return;

2650
	list_for_each_entry_continue(request, &engine->request_list, link)
2651 2652
		if (request->ctx == incomplete_ctx)
			reset_request(request);
2653
}
2654

2655
void i915_gem_reset(struct drm_i915_private *dev_priv)
2656
{
2657
	struct intel_engine_cs *engine;
2658
	enum intel_engine_id id;
2659

2660 2661
	i915_gem_retire_requests(dev_priv);

2662
	for_each_engine(engine, dev_priv, id)
2663 2664 2665
		i915_gem_reset_engine(engine);

	i915_gem_restore_fences(&dev_priv->drm);
2666 2667 2668 2669 2670 2671 2672

	if (dev_priv->gt.awake) {
		intel_sanitize_gt_powersave(dev_priv);
		intel_enable_gt_powersave(dev_priv);
		if (INTEL_GEN(dev_priv) >= 6)
			gen6_rps_busy(dev_priv);
	}
2673 2674 2675 2676 2677 2678 2679 2680 2681
}

static void nop_submit_request(struct drm_i915_gem_request *request)
{
}

static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
{
	engine->submit_request = nop_submit_request;
2682

2683 2684 2685 2686
	/* Mark all pending requests as complete so that any concurrent
	 * (lockless) lookup doesn't try and wait upon the request as we
	 * reset it.
	 */
2687
	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2688

2689 2690 2691 2692 2693 2694
	/*
	 * Clear the execlists queue up before freeing the requests, as those
	 * are the ones that keep the context and ringbuffer backing objects
	 * pinned in place.
	 */

2695
	if (i915.enable_execlists) {
2696 2697 2698 2699 2700 2701
		spin_lock(&engine->execlist_lock);
		INIT_LIST_HEAD(&engine->execlist_queue);
		i915_gem_request_put(engine->execlist_port[0].request);
		i915_gem_request_put(engine->execlist_port[1].request);
		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
		spin_unlock(&engine->execlist_lock);
2702 2703
	}

2704
	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2705 2706
}

2707
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2708
{
2709
	struct intel_engine_cs *engine;
2710
	enum intel_engine_id id;
2711

2712 2713
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2714

2715
	i915_gem_context_lost(dev_priv);
2716
	for_each_engine(engine, dev_priv, id)
2717
		i915_gem_cleanup_engine(engine);
2718
	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2719

2720
	i915_gem_retire_requests(dev_priv);
2721 2722
}

2723
static void
2724 2725
i915_gem_retire_work_handler(struct work_struct *work)
{
2726
	struct drm_i915_private *dev_priv =
2727
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
2728
	struct drm_device *dev = &dev_priv->drm;
2729

2730
	/* Come back later if the device is busy... */
2731
	if (mutex_trylock(&dev->struct_mutex)) {
2732
		i915_gem_retire_requests(dev_priv);
2733
		mutex_unlock(&dev->struct_mutex);
2734
	}
2735 2736 2737 2738 2739

	/* Keep the retire handler running until we are finally idle.
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
2740 2741
	if (READ_ONCE(dev_priv->gt.awake)) {
		i915_queue_hangcheck(dev_priv);
2742 2743
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
2744
				   round_jiffies_up_relative(HZ));
2745
	}
2746
}
2747

2748 2749 2750 2751
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
2752
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
2753
	struct drm_device *dev = &dev_priv->drm;
2754
	struct intel_engine_cs *engine;
2755
	enum intel_engine_id id;
2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

	if (READ_ONCE(dev_priv->gt.active_engines))
		return;

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

	if (!mutex_trylock(&dev->struct_mutex)) {
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

	if (dev_priv->gt.active_engines)
		goto out_unlock;
2777

2778
	for_each_engine(engine, dev_priv, id)
2779
		i915_gem_batch_pool_fini(&engine->batch_pool);
2780

2781 2782 2783
	GEM_BUG_ON(!dev_priv->gt.awake);
	dev_priv->gt.awake = false;
	rearm_hangcheck = false;
2784

2785 2786 2787 2788 2789
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_idle(dev_priv);
	intel_runtime_pm_put(dev_priv);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
2790

2791 2792 2793 2794
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
2795
	}
2796 2797
}

2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
	struct i915_vma *vma, *vn;

	mutex_lock(&obj->base.dev->struct_mutex);
	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
		if (vma->vm->file == fpriv)
			i915_vma_close(vma);
	mutex_unlock(&obj->base.dev->struct_mutex);
}

2811 2812
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2813 2814 2815
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
2839
	struct intel_rps_client *rps = to_rps_client(file);
2840
	struct drm_i915_gem_object *obj;
2841 2842
	unsigned long active;
	int idx, ret = 0;
2843

2844 2845 2846
	if (args->flags != 0)
		return -EINVAL;

2847
	obj = i915_gem_object_lookup(file, args->bo_handle);
2848
	if (!obj)
2849 2850
		return -ENOENT;

2851 2852 2853
	active = __I915_BO_ACTIVE(obj);
	for_each_active(active, idx) {
		s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
2854 2855
		ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
						    I915_WAIT_INTERRUPTIBLE,
2856 2857 2858
						    timeout, rps);
		if (ret)
			break;
2859 2860
	}

2861
	i915_gem_object_put_unlocked(obj);
2862
	return ret;
2863 2864
}

2865 2866
static void __i915_vma_iounmap(struct i915_vma *vma)
{
2867
	GEM_BUG_ON(i915_vma_is_pinned(vma));
2868 2869 2870 2871 2872 2873 2874 2875

	if (vma->iomap == NULL)
		return;

	io_mapping_unmap(vma->iomap);
	vma->iomap = NULL;
}

2876
int i915_vma_unbind(struct i915_vma *vma)
2877
{
2878
	struct drm_i915_gem_object *obj = vma->obj;
2879
	unsigned long active;
2880
	int ret;
2881

2882 2883 2884 2885
	/* First wait upon any activity as retiring the request may
	 * have side-effects such as unpinning or even unbinding this vma.
	 */
	active = i915_vma_get_active(vma);
2886
	if (active) {
2887 2888
		int idx;

2889 2890 2891 2892 2893
		/* When a closed VMA is retired, it is unbound - eek.
		 * In order to prevent it from being recursively closed,
		 * take a pin on the vma so that the second unbind is
		 * aborted.
		 */
2894
		__i915_vma_pin(vma);
2895

2896 2897 2898 2899
		for_each_active(active, idx) {
			ret = i915_gem_active_retire(&vma->last_read[idx],
						   &vma->vm->dev->struct_mutex);
			if (ret)
2900
				break;
2901 2902
		}

2903
		__i915_vma_unpin(vma);
2904 2905 2906
		if (ret)
			return ret;

2907 2908 2909
		GEM_BUG_ON(i915_vma_is_active(vma));
	}

2910
	if (i915_vma_is_pinned(vma))
2911 2912
		return -EBUSY;

2913 2914
	if (!drm_mm_node_allocated(&vma->node))
		goto destroy;
2915

2916 2917
	GEM_BUG_ON(obj->bind_count == 0);
	GEM_BUG_ON(!obj->pages);
2918

2919
	if (i915_vma_is_map_and_fenceable(vma)) {
2920
		/* release the fence reg _after_ flushing */
2921
		ret = i915_vma_put_fence(vma);
2922 2923
		if (ret)
			return ret;
2924

2925 2926 2927
		/* Force a pagefault for domain tracking on next user access */
		i915_gem_release_mmap(obj);

2928
		__i915_vma_iounmap(vma);
2929
		vma->flags &= ~I915_VMA_CAN_FENCE;
2930
	}
2931

2932 2933 2934 2935
	if (likely(!vma->vm->closed)) {
		trace_i915_vma_unbind(vma);
		vma->vm->unbind_vma(vma);
	}
2936
	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
2937

2938 2939 2940
	drm_mm_remove_node(&vma->node);
	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);

2941 2942 2943 2944
	if (vma->pages != obj->pages) {
		GEM_BUG_ON(!vma->pages);
		sg_free_table(vma->pages);
		kfree(vma->pages);
2945
	}
2946
	vma->pages = NULL;
2947

B
Ben Widawsky 已提交
2948
	/* Since the unbound list is global, only move to that list if
2949
	 * no more VMAs exist. */
2950 2951 2952
	if (--obj->bind_count == 0)
		list_move_tail(&obj->global_list,
			       &to_i915(obj->base.dev)->mm.unbound_list);
2953

2954 2955 2956 2957 2958 2959
	/* And finally now the object is completely decoupled from this vma,
	 * we can drop its hold on the backing storage and allow it to be
	 * reaped by the shrinker.
	 */
	i915_gem_object_unpin_pages(obj);

2960
destroy:
2961
	if (unlikely(i915_vma_is_closed(vma)))
2962 2963
		i915_vma_destroy(vma);

2964
	return 0;
2965 2966
}

2967
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2968
			   unsigned int flags)
2969
{
2970
	struct intel_engine_cs *engine;
2971
	enum intel_engine_id id;
2972
	int ret;
2973

2974
	for_each_engine(engine, dev_priv, id) {
2975 2976 2977
		if (engine->last_context == NULL)
			continue;

2978
		ret = intel_engine_idle(engine, flags);
2979 2980 2981
		if (ret)
			return ret;
	}
2982

2983
	return 0;
2984 2985
}

2986
static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2987 2988
				     unsigned long cache_level)
{
2989
	struct drm_mm_node *gtt_space = &vma->node;
2990 2991
	struct drm_mm_node *other;

2992 2993 2994 2995 2996 2997
	/*
	 * On some machines we have to be careful when putting differing types
	 * of snoopable memory together to avoid the prefetcher crossing memory
	 * domains and dying. During vm initialisation, we decide whether or not
	 * these constraints apply and set the drm_mm.color_adjust
	 * appropriately.
2998
	 */
2999
	if (vma->vm->mm.color_adjust == NULL)
3000 3001
		return true;

3002
	if (!drm_mm_node_allocated(gtt_space))
3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018
		return true;

	if (list_empty(&gtt_space->node_list))
		return true;

	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
	if (other->allocated && !other->hole_follows && other->color != cache_level)
		return false;

	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
		return false;

	return true;
}

3019
/**
3020 3021
 * i915_vma_insert - finds a slot for the vma in its address space
 * @vma: the vma
3022
 * @size: requested size in bytes (can be larger than the VMA)
3023
 * @alignment: required alignment
3024
 * @flags: mask of PIN_* flags to use
3025 3026 3027 3028 3029 3030 3031
 *
 * First we try to allocate some free space that meets the requirements for
 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
 * preferrably the oldest idle entry to make room for the new VMA.
 *
 * Returns:
 * 0 on success, negative error code otherwise.
3032
 */
3033 3034
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3035
{
3036 3037
	struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
	struct drm_i915_gem_object *obj = vma->obj;
3038
	u64 start, end;
3039
	int ret;
3040

3041
	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
3042
	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
3043 3044 3045

	size = max(size, vma->size);
	if (flags & PIN_MAPPABLE)
3046 3047
		size = i915_gem_get_ggtt_size(dev_priv, size,
					      i915_gem_object_get_tiling(obj));
3048

3049 3050 3051 3052
	alignment = max(max(alignment, vma->display_alignment),
			i915_gem_get_ggtt_alignment(dev_priv, size,
						    i915_gem_object_get_tiling(obj),
						    flags & PIN_MAPPABLE));
3053

3054
	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3055 3056

	end = vma->vm->total;
3057
	if (flags & PIN_MAPPABLE)
3058
		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3059
	if (flags & PIN_ZONE_4G)
3060
		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3061

3062 3063 3064
	/* If binding the object/GGTT view requires more space than the entire
	 * aperture has, reject it early before evicting everything in a vain
	 * attempt to find space.
3065
	 */
3066
	if (size > end) {
3067
		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3068
			  size, obj->base.size,
3069
			  flags & PIN_MAPPABLE ? "mappable" : "total",
3070
			  end);
3071
		return -E2BIG;
3072 3073
	}

3074
	ret = i915_gem_object_get_pages(obj);
C
Chris Wilson 已提交
3075
	if (ret)
3076
		return ret;
C
Chris Wilson 已提交
3077

3078 3079
	i915_gem_object_pin_pages(obj);

3080
	if (flags & PIN_OFFSET_FIXED) {
3081
		u64 offset = flags & PIN_OFFSET_MASK;
3082
		if (offset & (alignment - 1) || offset > end - size) {
3083
			ret = -EINVAL;
3084
			goto err_unpin;
3085
		}
3086

3087 3088 3089
		vma->node.start = offset;
		vma->node.size = size;
		vma->node.color = obj->cache_level;
3090
		ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3091 3092 3093
		if (ret) {
			ret = i915_gem_evict_for_vma(vma);
			if (ret == 0)
3094 3095 3096
				ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
			if (ret)
				goto err_unpin;
3097
		}
3098
	} else {
3099 3100
		u32 search_flag, alloc_flag;

3101 3102 3103 3104 3105 3106 3107
		if (flags & PIN_HIGH) {
			search_flag = DRM_MM_SEARCH_BELOW;
			alloc_flag = DRM_MM_CREATE_TOP;
		} else {
			search_flag = DRM_MM_SEARCH_DEFAULT;
			alloc_flag = DRM_MM_CREATE_DEFAULT;
		}
3108

3109 3110 3111 3112 3113 3114 3115 3116 3117
		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
		 * so we know that we always have a minimum alignment of 4096.
		 * The drm_mm range manager is optimised to return results
		 * with zero alignment, so where possible use the optimal
		 * path.
		 */
		if (alignment <= 4096)
			alignment = 0;

3118
search_free:
3119 3120
		ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
							  &vma->node,
3121 3122 3123 3124 3125 3126
							  size, alignment,
							  obj->cache_level,
							  start, end,
							  search_flag,
							  alloc_flag);
		if (ret) {
3127
			ret = i915_gem_evict_something(vma->vm, size, alignment,
3128 3129 3130 3131 3132
						       obj->cache_level,
						       start, end,
						       flags);
			if (ret == 0)
				goto search_free;
3133

3134
			goto err_unpin;
3135
		}
3136 3137 3138

		GEM_BUG_ON(vma->node.start < start);
		GEM_BUG_ON(vma->node.start + vma->node.size > end);
3139
	}
3140
	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3141

3142
	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3143
	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3144
	obj->bind_count++;
3145

3146
	return 0;
B
Ben Widawsky 已提交
3147

3148
err_unpin:
B
Ben Widawsky 已提交
3149
	i915_gem_object_unpin_pages(obj);
3150
	return ret;
3151 3152
}

3153
bool
3154 3155
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			bool force)
3156 3157 3158 3159 3160
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
3161
	if (obj->pages == NULL)
3162
		return false;
3163

3164 3165 3166 3167
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
3168
	if (obj->stolen || obj->phys_handle)
3169
		return false;
3170

3171 3172 3173 3174 3175 3176 3177 3178
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
3179 3180
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
		obj->cache_dirty = true;
3181
		return false;
3182
	}
3183

C
Chris Wilson 已提交
3184
	trace_i915_gem_object_clflush(obj);
3185
	drm_clflush_sg(obj->pages);
3186
	obj->cache_dirty = false;
3187 3188

	return true;
3189 3190 3191 3192
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3193
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3194
{
3195
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
C
Chris Wilson 已提交
3196

3197
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3198 3199
		return;

3200
	/* No actual flushing is required for the GTT write domain.  Writes
3201
	 * to it "immediately" go to main memory as far as we know, so there's
3202
	 * no chipset flush.  It also doesn't land in render cache.
3203 3204 3205 3206
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3207 3208 3209 3210 3211 3212 3213
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
	 * system agents we cannot reproduce this behaviour).
3214
	 */
3215
	wmb();
3216
	if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3217
		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3218

3219
	intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3220

3221
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3222
	trace_i915_gem_object_change_domain(obj,
3223
					    obj->base.read_domains,
3224
					    I915_GEM_DOMAIN_GTT);
3225 3226 3227 3228
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3229
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3230
{
3231
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3232 3233
		return;

3234
	if (i915_gem_clflush_object(obj, obj->pin_display))
3235
		i915_gem_chipset_flush(to_i915(obj->base.dev));
3236

3237
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3238

3239
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3240
	trace_i915_gem_object_change_domain(obj,
3241
					    obj->base.read_domains,
3242
					    I915_GEM_DOMAIN_CPU);
3243 3244
}

3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
			continue;

		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}
}

3263 3264
/**
 * Moves a single object to the GTT read, and possibly write domain.
3265 3266
 * @obj: object to act on
 * @write: ask for write access or read only
3267 3268 3269 3270
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3271
int
3272
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3273
{
C
Chris Wilson 已提交
3274
	uint32_t old_write_domain, old_read_domains;
3275
	int ret;
3276

3277
	ret = i915_gem_object_wait_rendering(obj, !write);
3278 3279 3280
	if (ret)
		return ret;

3281 3282 3283
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

3296
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
3297

3298 3299 3300 3301 3302 3303 3304
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3305 3306
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3307

3308 3309 3310
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3311 3312
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3313
	if (write) {
3314 3315 3316
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
		obj->dirty = 1;
3317 3318
	}

C
Chris Wilson 已提交
3319 3320 3321 3322
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3323
	/* And bump the LRU for this access */
3324
	i915_gem_object_bump_inactive_ggtt(obj);
3325

3326 3327 3328
	return 0;
}

3329 3330
/**
 * Changes the cache-level of an object across all VMA.
3331 3332
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3344 3345 3346
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3347
	struct i915_vma *vma;
3348
	int ret = 0;
3349 3350

	if (obj->cache_level == cache_level)
3351
		goto out;
3352

3353 3354 3355 3356 3357
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3358 3359
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3360 3361 3362
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3363
		if (i915_vma_is_pinned(vma)) {
3364 3365 3366 3367
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379
		if (i915_gem_valid_gtt_space(vma, cache_level))
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3380 3381
	}

3382 3383 3384 3385 3386 3387 3388
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3389
	if (obj->bind_count) {
3390 3391 3392 3393
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3394
		ret = i915_gem_object_wait_rendering(obj, false);
3395 3396 3397
		if (ret)
			return ret;

3398
		if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3415 3416 3417 3418 3419
			list_for_each_entry(vma, &obj->vma_list, obj_link) {
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
3420 3421 3422 3423 3424 3425 3426 3427
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3428 3429
		}

3430
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3431 3432 3433 3434 3435 3436 3437
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3438 3439
	}

3440
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3441 3442 3443
		vma->node.color = cache_level;
	obj->cache_level = cache_level;

3444
out:
3445 3446 3447 3448
	/* Flush the dirty CPU caches to the backing storage so that the
	 * object is now coherent at its new cache level (with respect
	 * to the access domain).
	 */
3449
	if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3450
		if (i915_gem_clflush_object(obj, true))
3451
			i915_gem_chipset_flush(to_i915(obj->base.dev));
3452 3453 3454 3455 3456
	}

	return 0;
}

B
Ben Widawsky 已提交
3457 3458
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3459
{
B
Ben Widawsky 已提交
3460
	struct drm_i915_gem_caching *args = data;
3461 3462
	struct drm_i915_gem_object *obj;

3463 3464
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
3465
		return -ENOENT;
3466

3467 3468 3469 3470 3471 3472
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3473 3474 3475 3476
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3477 3478 3479 3480
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3481

3482
	i915_gem_object_put_unlocked(obj);
3483
	return 0;
3484 3485
}

B
Ben Widawsky 已提交
3486 3487
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3488
{
3489
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
3490
	struct drm_i915_gem_caching *args = data;
3491 3492 3493 3494
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3495 3496
	switch (args->caching) {
	case I915_CACHING_NONE:
3497 3498
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3499
	case I915_CACHING_CACHED:
3500 3501 3502 3503 3504 3505
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3506
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3507 3508
			return -ENODEV;

3509 3510
		level = I915_CACHE_LLC;
		break;
3511
	case I915_CACHING_DISPLAY:
3512
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3513
		break;
3514 3515 3516 3517
	default:
		return -EINVAL;
	}

B
Ben Widawsky 已提交
3518 3519
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
3520
		return ret;
B
Ben Widawsky 已提交
3521

3522 3523
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj) {
3524 3525 3526 3527 3528
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);
3529
	i915_gem_object_put(obj);
3530 3531 3532 3533 3534
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

3535
/*
3536 3537 3538
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3539
 */
C
Chris Wilson 已提交
3540
struct i915_vma *
3541 3542
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3543
				     const struct i915_ggtt_view *view)
3544
{
C
Chris Wilson 已提交
3545
	struct i915_vma *vma;
3546
	u32 old_read_domains, old_write_domain;
3547 3548
	int ret;

3549 3550 3551
	/* Mark the pin_display early so that we account for the
	 * display coherency whilst setting up the cache domains.
	 */
3552
	obj->pin_display++;
3553

3554 3555 3556 3557 3558 3559 3560 3561 3562
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3563
	ret = i915_gem_object_set_cache_level(obj,
3564 3565
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
3566 3567
	if (ret) {
		vma = ERR_PTR(ret);
3568
		goto err_unpin_display;
C
Chris Wilson 已提交
3569
	}
3570

3571 3572
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3573 3574 3575 3576
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
3577
	 */
3578 3579 3580 3581 3582 3583
	vma = ERR_PTR(-ENOSPC);
	if (view->type == I915_GGTT_VIEW_NORMAL)
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
					       PIN_MAPPABLE | PIN_NONBLOCK);
	if (IS_ERR(vma))
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
C
Chris Wilson 已提交
3584
	if (IS_ERR(vma))
3585
		goto err_unpin_display;
3586

3587 3588
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

C
Chris Wilson 已提交
3589 3590
	WARN_ON(obj->pin_display > i915_vma_pin_count(vma));

3591
	i915_gem_object_flush_cpu_write_domain(obj);
3592

3593
	old_write_domain = obj->base.write_domain;
3594
	old_read_domains = obj->base.read_domains;
3595 3596 3597 3598

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3599
	obj->base.write_domain = 0;
3600
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3601 3602 3603

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3604
					    old_write_domain);
3605

C
Chris Wilson 已提交
3606
	return vma;
3607 3608

err_unpin_display:
3609
	obj->pin_display--;
C
Chris Wilson 已提交
3610
	return vma;
3611 3612 3613
}

void
C
Chris Wilson 已提交
3614
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3615
{
C
Chris Wilson 已提交
3616
	if (WARN_ON(vma->obj->pin_display == 0))
3617 3618
		return;

3619 3620
	if (--vma->obj->pin_display == 0)
		vma->display_alignment = 0;
3621

3622 3623 3624 3625
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
	if (!i915_vma_is_active(vma))
		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);

C
Chris Wilson 已提交
3626 3627
	i915_vma_unpin(vma);
	WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
3628 3629
}

3630 3631
/**
 * Moves a single object to the CPU read, and possibly write domain.
3632 3633
 * @obj: object to act on
 * @write: requesting write or read-only access
3634 3635 3636 3637
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3638
int
3639
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3640
{
C
Chris Wilson 已提交
3641
	uint32_t old_write_domain, old_read_domains;
3642 3643
	int ret;

3644
	ret = i915_gem_object_wait_rendering(obj, !write);
3645 3646 3647
	if (ret)
		return ret;

3648 3649 3650
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3651
	i915_gem_object_flush_gtt_write_domain(obj);
3652

3653 3654
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3655

3656
	/* Flush the CPU cache if it's still invalid. */
3657
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3658
		i915_gem_clflush_object(obj, false);
3659

3660
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3661 3662 3663 3664 3665
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3666
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3667 3668 3669 3670 3671

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3672 3673
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3674
	}
3675

C
Chris Wilson 已提交
3676 3677 3678 3679
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3680 3681 3682
	return 0;
}

3683 3684 3685
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3686 3687 3688 3689
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3690 3691 3692
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3693
static int
3694
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3695
{
3696
	struct drm_i915_private *dev_priv = to_i915(dev);
3697
	struct drm_i915_file_private *file_priv = file->driver_priv;
3698
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3699
	struct drm_i915_gem_request *request, *target = NULL;
3700
	int ret;
3701

3702 3703 3704 3705
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
	if (ret)
		return ret;

3706 3707 3708
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
3709

3710
	spin_lock(&file_priv->mm.lock);
3711
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3712 3713
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3714

3715 3716 3717 3718 3719 3720 3721
		/*
		 * Note that the request might not have been submitted yet.
		 * In which case emitted_jiffies will be zero.
		 */
		if (!request->emitted_jiffies)
			continue;

3722
		target = request;
3723
	}
3724
	if (target)
3725
		i915_gem_request_get(target);
3726
	spin_unlock(&file_priv->mm.lock);
3727

3728
	if (target == NULL)
3729
		return 0;
3730

3731
	ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
3732
	i915_gem_request_put(target);
3733

3734 3735 3736
	return ret;
}

3737
static bool
3738
i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3739
{
3740 3741 3742
	if (!drm_mm_node_allocated(&vma->node))
		return false;

3743 3744 3745 3746
	if (vma->node.size < size)
		return true;

	if (alignment && vma->node.start & (alignment - 1))
3747 3748
		return true;

3749
	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
3750 3751 3752 3753 3754 3755
		return true;

	if (flags & PIN_OFFSET_BIAS &&
	    vma->node.start < (flags & PIN_OFFSET_MASK))
		return true;

3756 3757 3758 3759
	if (flags & PIN_OFFSET_FIXED &&
	    vma->node.start != (flags & PIN_OFFSET_MASK))
		return true;

3760 3761 3762
	return false;
}

3763 3764 3765
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
3766
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3767 3768 3769
	bool mappable, fenceable;
	u32 fence_size, fence_alignment;

3770
	fence_size = i915_gem_get_ggtt_size(dev_priv,
3771
					    vma->size,
3772
					    i915_gem_object_get_tiling(obj));
3773
	fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3774
						      vma->size,
3775
						      i915_gem_object_get_tiling(obj),
3776
						      true);
3777 3778 3779 3780 3781

	fenceable = (vma->node.size == fence_size &&
		     (vma->node.start & (fence_alignment - 1)) == 0);

	mappable = (vma->node.start + fence_size <=
3782
		    dev_priv->ggtt.mappable_end);
3783

3784 3785 3786 3787
	if (mappable && fenceable)
		vma->flags |= I915_VMA_CAN_FENCE;
	else
		vma->flags &= ~I915_VMA_CAN_FENCE;
3788 3789
}

3790 3791
int __i915_vma_do_pin(struct i915_vma *vma,
		      u64 size, u64 alignment, u64 flags)
3792
{
3793
	unsigned int bound = vma->flags;
3794 3795
	int ret;

3796
	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3797
	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
B
Ben Widawsky 已提交
3798

3799 3800 3801 3802
	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
		ret = -EBUSY;
		goto err;
	}
3803

3804
	if ((bound & I915_VMA_BIND_MASK) == 0) {
3805 3806 3807
		ret = i915_vma_insert(vma, size, alignment, flags);
		if (ret)
			goto err;
3808
	}
3809

3810
	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3811
	if (ret)
3812
		goto err;
3813

3814
	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
3815
		__i915_vma_set_map_and_fenceable(vma);
3816

3817
	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3818 3819
	return 0;

3820 3821 3822
err:
	__i915_vma_unpin(vma);
	return ret;
3823 3824
}

C
Chris Wilson 已提交
3825
struct i915_vma *
3826 3827
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
3828
			 u64 size,
3829 3830
			 u64 alignment,
			 u64 flags)
3831
{
3832 3833
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.base;
3834 3835
	struct i915_vma *vma;
	int ret;
3836

C
Chris Wilson 已提交
3837
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3838
	if (IS_ERR(vma))
C
Chris Wilson 已提交
3839
		return vma;
3840 3841 3842 3843

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
		if (flags & PIN_NONBLOCK &&
		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
C
Chris Wilson 已提交
3844
			return ERR_PTR(-ENOSPC);
3845

3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880
		if (flags & PIN_MAPPABLE) {
			u32 fence_size;

			fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
							    i915_gem_object_get_tiling(obj));
			/* If the required space is larger than the available
			 * aperture, we will not able to find a slot for the
			 * object and unbinding the object now will be in
			 * vain. Worse, doing so may cause us to ping-pong
			 * the object in and out of the Global GTT and
			 * waste a lot of cycles under the mutex.
			 */
			if (fence_size > dev_priv->ggtt.mappable_end)
				return ERR_PTR(-E2BIG);

			/* If NONBLOCK is set the caller is optimistically
			 * trying to cache the full object within the mappable
			 * aperture, and *must* have a fallback in place for
			 * situations where we cannot bind the object. We
			 * can be a little more lax here and use the fallback
			 * more often to avoid costly migrations of ourselves
			 * and other objects within the aperture.
			 *
			 * Half-the-aperture is used as a simple heuristic.
			 * More interesting would to do search for a free
			 * block prior to making the commitment to unbind.
			 * That caters for the self-harm case, and with a
			 * little more heuristics (e.g. NOFAULT, NOEVICT)
			 * we could try to minimise harm to others.
			 */
			if (flags & PIN_NONBLOCK &&
			    fence_size > dev_priv->ggtt.mappable_end / 2)
				return ERR_PTR(-ENOSPC);
		}

3881 3882
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
3883 3884 3885
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
3886
		     !!(flags & PIN_MAPPABLE),
3887
		     i915_vma_is_map_and_fenceable(vma));
3888 3889
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
3890
			return ERR_PTR(ret);
3891 3892
	}

C
Chris Wilson 已提交
3893 3894 3895
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
3896

C
Chris Wilson 已提交
3897
	return vma;
3898 3899
}

3900
static __always_inline unsigned int __busy_read_flag(unsigned int id)
3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
3915 3916 3917 3918 3919 3920 3921 3922 3923
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
3924 3925
}

3926
static __always_inline unsigned int
3927 3928 3929
__busy_set_if_active(const struct i915_gem_active *active,
		     unsigned int (*flag)(unsigned int id))
{
3930
	struct drm_i915_gem_request *request;
3931

3932 3933 3934
	request = rcu_dereference(active->request);
	if (!request || i915_gem_request_completed(request))
		return 0;
3935

3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991
	/* This is racy. See __i915_gem_active_get_rcu() for an in detail
	 * discussion of how to handle the race correctly, but for reporting
	 * the busy state we err on the side of potentially reporting the
	 * wrong engine as being busy (but we guarantee that the result
	 * is at least self-consistent).
	 *
	 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
	 * whilst we are inspecting it, even under the RCU read lock as we are.
	 * This means that there is a small window for the engine and/or the
	 * seqno to have been overwritten. The seqno will always be in the
	 * future compared to the intended, and so we know that if that
	 * seqno is idle (on whatever engine) our request is idle and the
	 * return 0 above is correct.
	 *
	 * The issue is that if the engine is switched, it is just as likely
	 * to report that it is busy (but since the switch happened, we know
	 * the request should be idle). So there is a small chance that a busy
	 * result is actually the wrong engine.
	 *
	 * So why don't we care?
	 *
	 * For starters, the busy ioctl is a heuristic that is by definition
	 * racy. Even with perfect serialisation in the driver, the hardware
	 * state is constantly advancing - the state we report to the user
	 * is stale.
	 *
	 * The critical information for the busy-ioctl is whether the object
	 * is idle as userspace relies on that to detect whether its next
	 * access will stall, or if it has missed submitting commands to
	 * the hardware allowing the GPU to stall. We never generate a
	 * false-positive for idleness, thus busy-ioctl is reliable at the
	 * most fundamental level, and we maintain the guarantee that a
	 * busy object left to itself will eventually become idle (and stay
	 * idle!).
	 *
	 * We allow ourselves the leeway of potentially misreporting the busy
	 * state because that is an optimisation heuristic that is constantly
	 * in flux. Being quickly able to detect the busy/idle state is much
	 * more important than accurate logging of exactly which engines were
	 * busy.
	 *
	 * For accuracy in reporting the engine, we could use
	 *
	 *	result = 0;
	 *	request = __i915_gem_active_get_rcu(active);
	 *	if (request) {
	 *		if (!i915_gem_request_completed(request))
	 *			result = flag(request->engine->exec_id);
	 *		i915_gem_request_put(request);
	 *	}
	 *
	 * but that still remains susceptible to both hardware and userspace
	 * races. So we accept making the result of that race slightly worse,
	 * given the rarity of the race and its low impact on the result.
	 */
	return flag(READ_ONCE(request->engine->exec_id));
3992 3993
}

3994
static __always_inline unsigned int
3995 3996 3997 3998 3999
busy_check_reader(const struct i915_gem_active *active)
{
	return __busy_set_if_active(active, __busy_read_flag);
}

4000
static __always_inline unsigned int
4001 4002 4003 4004 4005
busy_check_writer(const struct i915_gem_active *active)
{
	return __busy_set_if_active(active, __busy_write_id);
}

4006 4007
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4008
		    struct drm_file *file)
4009 4010
{
	struct drm_i915_gem_busy *args = data;
4011
	struct drm_i915_gem_object *obj;
4012
	unsigned long active;
4013

4014
	obj = i915_gem_object_lookup(file, args->handle);
4015 4016
	if (!obj)
		return -ENOENT;
4017

4018
	args->busy = 0;
4019 4020 4021
	active = __I915_BO_ACTIVE(obj);
	if (active) {
		int idx;
4022

4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038
		/* Yes, the lookups are intentionally racy.
		 *
		 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
		 * to regard the value as stale and as our ABI guarantees
		 * forward progress, we confirm the status of each active
		 * request with the hardware.
		 *
		 * Even though we guard the pointer lookup by RCU, that only
		 * guarantees that the pointer and its contents remain
		 * dereferencable and does *not* mean that the request we
		 * have is the same as the one being tracked by the object.
		 *
		 * Consider that we lookup the request just as it is being
		 * retired and freed. We take a local copy of the pointer,
		 * but before we add its engine into the busy set, the other
		 * thread reallocates it and assigns it to a task on another
4039 4040 4041 4042 4043 4044
		 * engine with a fresh and incomplete seqno. Guarding against
		 * that requires careful serialisation and reference counting,
		 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
		 * instead we expect that if the result is busy, which engines
		 * are busy is not completely reliable - we only guarantee
		 * that the object was busy.
4045 4046 4047 4048 4049 4050 4051
		 */
		rcu_read_lock();

		for_each_active(active, idx)
			args->busy |= busy_check_reader(&obj->last_read[idx]);

		/* For ABI sanity, we only care that the write engine is in
4052 4053 4054 4055 4056
		 * the set of read engines. This should be ensured by the
		 * ordering of setting last_read/last_write in
		 * i915_vma_move_to_active(), and then in reverse in retire.
		 * However, for good measure, we always report the last_write
		 * request as a busy read as well as being a busy write.
4057 4058 4059 4060 4061 4062 4063 4064 4065
		 *
		 * We don't care that the set of active read/write engines
		 * may change during construction of the result, as it is
		 * equally liable to change before userspace can inspect
		 * the result.
		 */
		args->busy |= busy_check_writer(&obj->last_write);

		rcu_read_unlock();
4066
	}
4067

4068 4069
	i915_gem_object_put_unlocked(obj);
	return 0;
4070 4071 4072 4073 4074 4075
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4076
	return i915_gem_ring_throttle(dev, file_priv);
4077 4078
}

4079 4080 4081 4082
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4083
	struct drm_i915_private *dev_priv = to_i915(dev);
4084
	struct drm_i915_gem_madvise *args = data;
4085
	struct drm_i915_gem_object *obj;
4086
	int ret;
4087 4088 4089 4090 4091 4092 4093 4094 4095

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4096 4097 4098 4099
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

4100 4101
	obj = i915_gem_object_lookup(file_priv, args->handle);
	if (!obj) {
4102 4103
		ret = -ENOENT;
		goto unlock;
4104 4105
	}

4106
	if (obj->pages &&
4107
	    i915_gem_object_is_tiled(obj) &&
4108 4109 4110 4111 4112 4113 4114
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		if (obj->madv == I915_MADV_WILLNEED)
			i915_gem_object_unpin_pages(obj);
		if (args->madv == I915_MADV_WILLNEED)
			i915_gem_object_pin_pages(obj);
	}

4115 4116
	if (obj->madv != __I915_MADV_PURGED)
		obj->madv = args->madv;
4117

C
Chris Wilson 已提交
4118
	/* if the object is no longer attached, discard its backing storage */
4119
	if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4120 4121
		i915_gem_object_truncate(obj);

4122
	args->retained = obj->madv != __I915_MADV_PURGED;
C
Chris Wilson 已提交
4123

4124
	i915_gem_object_put(obj);
4125
unlock:
4126
	mutex_unlock(&dev->struct_mutex);
4127
	return ret;
4128 4129
}

4130 4131
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4132
{
4133 4134
	int i;

4135
	INIT_LIST_HEAD(&obj->global_list);
4136
	INIT_LIST_HEAD(&obj->userfault_link);
4137
	for (i = 0; i < I915_NUM_ENGINES; i++)
4138 4139 4140 4141
		init_request_active(&obj->last_read[i],
				    i915_gem_object_retire__read);
	init_request_active(&obj->last_write,
			    i915_gem_object_retire__write);
4142
	INIT_LIST_HEAD(&obj->obj_exec_link);
B
Ben Widawsky 已提交
4143
	INIT_LIST_HEAD(&obj->vma_list);
4144
	INIT_LIST_HEAD(&obj->batch_pool_link);
4145

4146 4147
	obj->ops = ops;

4148
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4149 4150
	obj->madv = I915_MADV_WILLNEED;

4151
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4152 4153
}

4154
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4155
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4156 4157 4158 4159
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

4160 4161 4162 4163 4164 4165
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
	(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))

struct drm_i915_gem_object *
i915_gem_object_create(struct drm_device *dev, u64 size)
4166
{
4167
	struct drm_i915_gem_object *obj;
4168
	struct address_space *mapping;
D
Daniel Vetter 已提交
4169
	gfp_t mask;
4170
	int ret;
4171

4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
	if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

4183
	obj = i915_gem_object_alloc(dev);
4184
	if (obj == NULL)
4185
		return ERR_PTR(-ENOMEM);
4186

4187 4188 4189
	ret = drm_gem_object_init(dev, &obj->base, size);
	if (ret)
		goto fail;
4190

4191 4192 4193 4194 4195 4196 4197
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4198
	mapping = obj->base.filp->f_mapping;
4199
	mapping_set_gfp_mask(mapping, mask);
4200

4201
	i915_gem_object_init(obj, &i915_gem_object_ops);
4202

4203 4204
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4205

4206 4207
	if (HAS_LLC(dev)) {
		/* On some devices, we can have the GPU use the LLC (the CPU
4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

4223 4224
	trace_i915_gem_object_create(obj);

4225
	return obj;
4226 4227 4228 4229 4230

fail:
	i915_gem_object_free(obj);

	return ERR_PTR(ret);
4231 4232
}

4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

	if (obj->madv != I915_MADV_WILLNEED)
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4257
void i915_gem_free_object(struct drm_gem_object *gem_obj)
4258
{
4259
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4260
	struct drm_device *dev = obj->base.dev;
4261
	struct drm_i915_private *dev_priv = to_i915(dev);
4262
	struct i915_vma *vma, *next;
4263

4264 4265
	intel_runtime_pm_get(dev_priv);

4266 4267
	trace_i915_gem_object_destroy(obj);

4268 4269 4270 4271 4272 4273 4274
	/* All file-owned VMA should have been released by this point through
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4275
	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4276
		GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4277
		GEM_BUG_ON(i915_vma_is_active(vma));
4278
		vma->flags &= ~I915_VMA_PIN_MASK;
4279
		i915_vma_close(vma);
4280
	}
4281
	GEM_BUG_ON(obj->bind_count);
4282

B
Ben Widawsky 已提交
4283 4284 4285 4286 4287
	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
	 * before progressing. */
	if (obj->stolen)
		i915_gem_object_unpin_pages(obj);

4288
	WARN_ON(atomic_read(&obj->frontbuffer_bits));
4289

4290 4291
	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4292
	    i915_gem_object_is_tiled(obj))
4293 4294
		i915_gem_object_unpin_pages(obj);

B
Ben Widawsky 已提交
4295 4296
	if (WARN_ON(obj->pages_pin_count))
		obj->pages_pin_count = 0;
4297
	if (discard_backing_storage(obj))
4298
		obj->madv = I915_MADV_DONTNEED;
4299
	i915_gem_object_put_pages(obj);
4300

4301 4302
	BUG_ON(obj->pages);

4303 4304
	if (obj->base.import_attach)
		drm_prime_gem_destroy(&obj->base, NULL);
4305

4306 4307 4308
	if (obj->ops->release)
		obj->ops->release(obj);

4309 4310
	drm_gem_object_release(&obj->base);
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
4311

4312
	kfree(obj->bit_17);
4313
	i915_gem_object_free(obj);
4314 4315

	intel_runtime_pm_put(dev_priv);
4316 4317
}

4318
int i915_gem_suspend(struct drm_device *dev)
4319
{
4320
	struct drm_i915_private *dev_priv = to_i915(dev);
4321
	int ret;
4322

4323 4324
	intel_suspend_gt_powersave(dev_priv);

4325
	mutex_lock(&dev->struct_mutex);
4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	ret = i915_gem_switch_to_kernel_context(dev_priv);
	if (ret)
		goto err;

4339 4340 4341
	ret = i915_gem_wait_for_idle(dev_priv,
				     I915_WAIT_INTERRUPTIBLE |
				     I915_WAIT_LOCKED);
4342
	if (ret)
4343
		goto err;
4344

4345
	i915_gem_retire_requests(dev_priv);
4346

4347
	i915_gem_context_lost(dev_priv);
4348 4349
	mutex_unlock(&dev->struct_mutex);

4350
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4351 4352
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
	flush_delayed_work(&dev_priv->gt.idle_work);
4353

4354 4355 4356
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
4357
	WARN_ON(dev_priv->gt.awake);
4358

4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
	if (HAS_HW_CONTEXTS(dev)) {
		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
		WARN_ON(reset && reset != -ENODEV);
	}

4383
	return 0;
4384 4385 4386 4387

err:
	mutex_unlock(&dev->struct_mutex);
	return ret;
4388 4389
}

4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400
void i915_gem_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	mutex_lock(&dev->struct_mutex);
	i915_gem_restore_gtt_mappings(dev);

	/* As we didn't flush the kernel context before suspend, we cannot
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
4401
	dev_priv->gt.resume(dev_priv);
4402 4403 4404 4405

	mutex_unlock(&dev->struct_mutex);
}

4406 4407
void i915_gem_init_swizzling(struct drm_device *dev)
{
4408
	struct drm_i915_private *dev_priv = to_i915(dev);
4409

4410
	if (INTEL_INFO(dev)->gen < 5 ||
4411 4412 4413 4414 4415 4416
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4417
	if (IS_GEN5(dev_priv))
4418 4419
		return;

4420
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4421
	if (IS_GEN6(dev_priv))
4422
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4423
	else if (IS_GEN7(dev_priv))
4424
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4425
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
4426
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4427 4428
	else
		BUG();
4429
}
D
Daniel Vetter 已提交
4430

4431
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4432 4433 4434 4435 4436 4437 4438
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

4439
static void init_unused_rings(struct drm_i915_private *dev_priv)
4440
{
4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
4453 4454 4455
	}
}

4456 4457 4458
int
i915_gem_init_hw(struct drm_device *dev)
{
4459
	struct drm_i915_private *dev_priv = to_i915(dev);
4460
	struct intel_engine_cs *engine;
4461
	enum intel_engine_id id;
C
Chris Wilson 已提交
4462
	int ret;
4463

4464 4465 4466
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4467
	if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4468
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4469

4470
	if (IS_HASWELL(dev_priv))
4471
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4472
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4473

4474
	if (HAS_PCH_NOP(dev_priv)) {
4475
		if (IS_IVYBRIDGE(dev_priv)) {
4476 4477 4478 4479 4480 4481 4482 4483
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
		} else if (INTEL_INFO(dev)->gen >= 7) {
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4484 4485
	}

4486 4487
	i915_gem_init_swizzling(dev);

4488 4489 4490 4491 4492 4493
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
4494
	init_unused_rings(dev_priv);
4495

4496
	BUG_ON(!dev_priv->kernel_context);
4497

4498 4499 4500 4501 4502 4503 4504
	ret = i915_ppgtt_init_hw(dev);
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

	/* Need to do basic initialisation of all rings first: */
4505
	for_each_engine(engine, dev_priv, id) {
4506
		ret = engine->init_hw(engine);
D
Daniel Vetter 已提交
4507
		if (ret)
4508
			goto out;
D
Daniel Vetter 已提交
4509
	}
4510

4511 4512
	intel_mocs_init_l3cc_table(dev);

4513
	/* We can't enable contexts until all firmware is loaded */
4514 4515 4516
	ret = intel_guc_setup(dev);
	if (ret)
		goto out;
4517

4518 4519
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4520
	return ret;
4521 4522
}

4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
{
	if (INTEL_INFO(dev_priv)->gen < 6)
		return false;

	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

	if (value >= 0)
		return value;

#ifdef CONFIG_INTEL_IOMMU
	/* Enable semaphores on SNB when IO remapping is off */
	if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

4544 4545
int i915_gem_init(struct drm_device *dev)
{
4546
	struct drm_i915_private *dev_priv = to_i915(dev);
4547 4548 4549
	int ret;

	mutex_lock(&dev->struct_mutex);
4550
	spin_lock_init(&dev_priv->mm.userfault_lock);
4551

4552
	if (!i915.enable_execlists) {
4553
		dev_priv->gt.resume = intel_legacy_submission_resume;
4554
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4555
	} else {
4556
		dev_priv->gt.resume = intel_lr_context_resume;
4557
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4558 4559
	}

4560 4561 4562 4563 4564 4565 4566 4567
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4568
	i915_gem_init_userptr(dev_priv);
4569 4570 4571 4572

	ret = i915_gem_init_ggtt(dev_priv);
	if (ret)
		goto out_unlock;
4573

4574
	ret = i915_gem_context_init(dev);
4575 4576
	if (ret)
		goto out_unlock;
4577

4578
	ret = intel_engines_init(dev);
D
Daniel Vetter 已提交
4579
	if (ret)
4580
		goto out_unlock;
4581

4582
	ret = i915_gem_init_hw(dev);
4583
	if (ret == -EIO) {
4584
		/* Allow engine initialisation to fail by marking the GPU as
4585 4586 4587 4588
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4589
		i915_gem_set_wedged(dev_priv);
4590
		ret = 0;
4591
	}
4592 4593

out_unlock:
4594
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4595
	mutex_unlock(&dev->struct_mutex);
4596

4597
	return ret;
4598 4599
}

4600
void
4601
i915_gem_cleanup_engines(struct drm_device *dev)
4602
{
4603
	struct drm_i915_private *dev_priv = to_i915(dev);
4604
	struct intel_engine_cs *engine;
4605
	enum intel_engine_id id;
4606

4607
	for_each_engine(engine, dev_priv, id)
4608
		dev_priv->gt.cleanup_engine(engine);
4609 4610
}

4611 4612 4613
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
4614
	struct drm_device *dev = &dev_priv->drm;
4615
	int i;
4616 4617 4618 4619 4620 4621 4622 4623 4624 4625

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
	else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
		 IS_I945GM(dev_priv) || IS_G33(dev_priv))
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4626
	if (intel_vgpu_active(dev_priv))
4627 4628 4629 4630
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
4631 4632 4633 4634 4635 4636 4637
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
4638 4639 4640 4641 4642
	i915_gem_restore_fences(dev);

	i915_gem_detect_bit_6_swizzle(dev);
}

4643
void
4644
i915_gem_load_init(struct drm_device *dev)
4645
{
4646
	struct drm_i915_private *dev_priv = to_i915(dev);
4647

4648
	dev_priv->objects =
4649 4650 4651 4652
		kmem_cache_create("i915_gem_object",
				  sizeof(struct drm_i915_gem_object), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
4653 4654 4655 4656 4657
	dev_priv->vmas =
		kmem_cache_create("i915_gem_vma",
				  sizeof(struct i915_vma), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
4658 4659 4660
	dev_priv->requests =
		kmem_cache_create("i915_gem_request",
				  sizeof(struct drm_i915_gem_request), 0,
4661 4662 4663
				  SLAB_HWCACHE_ALIGN |
				  SLAB_RECLAIM_ACCOUNT |
				  SLAB_DESTROY_BY_RCU,
4664
				  NULL);
4665

4666
	INIT_LIST_HEAD(&dev_priv->context_list);
C
Chris Wilson 已提交
4667 4668
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4669
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4670
	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4671
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4672
			  i915_gem_retire_work_handler);
4673
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4674
			  i915_gem_idle_work_handler);
4675
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4676
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4677

4678 4679
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

4680
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4681

4682 4683
	dev_priv->mm.interruptible = true;

4684 4685
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

4686
	spin_lock_init(&dev_priv->fb_tracking.lock);
4687
}
4688

4689 4690 4691 4692 4693 4694 4695
void i915_gem_load_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	kmem_cache_destroy(dev_priv->requests);
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
4696 4697 4698

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
4699 4700
}

4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink_all(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	intel_runtime_pm_put(dev_priv);

	return 0;
}

4714 4715 4716
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
4717 4718 4719 4720 4721
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
4722 4723 4724 4725 4726 4727 4728 4729 4730 4731

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
4732 4733 4734
	 *
	 * To try and reduce the hibernation image, we manually shrink
	 * the objects as well.
4735 4736
	 */

4737 4738
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4739

4740 4741 4742 4743 4744
	for (p = phases; *p; p++) {
		list_for_each_entry(obj, *p, global_list) {
			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
		}
4745
	}
4746
	mutex_unlock(&dev_priv->drm.struct_mutex);
4747 4748 4749 4750

	return 0;
}

4751
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4752
{
4753
	struct drm_i915_file_private *file_priv = file->driver_priv;
4754
	struct drm_i915_gem_request *request;
4755 4756 4757 4758 4759

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4760
	spin_lock(&file_priv->mm.lock);
4761
	list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4762
		request->file_priv = NULL;
4763
	spin_unlock(&file_priv->mm.lock);
4764

4765
	if (!list_empty(&file_priv->rps.link)) {
4766
		spin_lock(&to_i915(dev)->rps.client_lock);
4767
		list_del(&file_priv->rps.link);
4768
		spin_unlock(&to_i915(dev)->rps.client_lock);
4769
	}
4770 4771 4772 4773 4774
}

int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;
4775
	int ret;
4776 4777 4778 4779 4780 4781 4782 4783

	DRM_DEBUG_DRIVER("\n");

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
4784
	file_priv->dev_priv = to_i915(dev);
4785
	file_priv->file = file;
4786
	INIT_LIST_HEAD(&file_priv->rps.link);
4787 4788 4789 4790

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

4791
	file_priv->bsd_engine = -1;
4792

4793 4794 4795
	ret = i915_gem_context_open(dev, file);
	if (ret)
		kfree(file_priv);
4796

4797
	return ret;
4798 4799
}

4800 4801
/**
 * i915_gem_track_fb - update frontbuffer tracking
4802 4803 4804
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
4805 4806 4807 4808
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
4809 4810 4811 4812
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
4813 4814 4815 4816 4817 4818 4819 4820 4821
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

4822
	if (old) {
4823 4824
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4825 4826 4827
	}

	if (new) {
4828 4829
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4830 4831 4832
	}
}

4833 4834 4835 4836 4837 4838 4839
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
{
	struct page *page;

	/* Only default objects have per-page dirty tracking */
4840
	if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4841 4842 4843 4844 4845 4846 4847
		return NULL;

	page = i915_gem_object_get_page(obj, n);
	set_page_dirty(page);
	return page;
}

4848 4849 4850 4851 4852 4853 4854 4855 4856 4857
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
	struct sg_table *sg;
	size_t bytes;
	int ret;

4858
	obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4859
	if (IS_ERR(obj))
4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872
		return obj;

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret)
		goto fail;

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		goto fail;

	i915_gem_object_pin_pages(obj);
	sg = obj->pages;
	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4873
	obj->dirty = 1;		/* Backing store is now out of date */
4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884
	i915_gem_object_unpin_pages(obj);

	if (WARN_ON(bytes != size)) {
		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
		ret = -EFAULT;
		goto fail;
	}

	return obj;

fail:
4885
	i915_gem_object_put(obj);
4886 4887
	return ERR_PTR(ret);
}