i915_gem.c 126.1 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_gem_dmabuf.h"
33
#include "i915_vgpu.h"
C
Chris Wilson 已提交
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36
#include "intel_frontbuffer.h"
37
#include "intel_mocs.h"
38
#include <linux/reservation.h>
39
#include <linux/shmem_fs.h>
40
#include <linux/slab.h>
41
#include <linux/swap.h>
J
Jesse Barnes 已提交
42
#include <linux/pci.h>
43
#include <linux/dma-buf.h>
44

45
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
46
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
47

48 49 50 51 52 53
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
}

54 55
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
56 57 58
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

59 60 61 62 63 64
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
static int
insert_mappable_node(struct drm_i915_private *i915,
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
	return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
						   size, 0, 0, 0,
						   i915->ggtt.mappable_end,
						   DRM_MM_SEARCH_DEFAULT,
						   DRM_MM_CREATE_DEFAULT);
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

83 84 85 86
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
87
	spin_lock(&dev_priv->mm.object_stat_lock);
88 89
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
90
	spin_unlock(&dev_priv->mm.object_stat_lock);
91 92 93 94 95
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
96
	spin_lock(&dev_priv->mm.object_stat_lock);
97 98
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
99
	spin_unlock(&dev_priv->mm.object_stat_lock);
100 101
}

102
static int
103
i915_gem_wait_for_error(struct i915_gpu_error *error)
104 105 106
{
	int ret;

107
	if (!i915_reset_in_progress(error))
108 109
		return 0;

110 111 112 113 114
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
115
	ret = wait_event_interruptible_timeout(error->reset_queue,
116
					       !i915_reset_in_progress(error),
117
					       10*HZ);
118 119 120 121
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
122
		return ret;
123 124
	} else {
		return 0;
125
	}
126 127
}

128
int i915_mutex_lock_interruptible(struct drm_device *dev)
129
{
130
	struct drm_i915_private *dev_priv = to_i915(dev);
131 132
	int ret;

133
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
134 135 136 137 138 139 140 141 142
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
143

144 145
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
146
			    struct drm_file *file)
147
{
148
	struct drm_i915_private *dev_priv = to_i915(dev);
149
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
150
	struct drm_i915_gem_get_aperture *args = data;
151
	struct i915_vma *vma;
152
	size_t pinned;
153

154
	pinned = 0;
155
	mutex_lock(&dev->struct_mutex);
156
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
157
		if (i915_vma_is_pinned(vma))
158
			pinned += vma->node.size;
159
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
160
		if (i915_vma_is_pinned(vma))
161
			pinned += vma->node.size;
162
	mutex_unlock(&dev->struct_mutex);
163

164
	args->aper_size = ggtt->base.total;
165
	args->aper_available_size = args->aper_size - pinned;
166

167 168 169
	return 0;
}

170 171
static int
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
172
{
173
	struct address_space *mapping = obj->base.filp->f_mapping;
174 175 176 177
	char *vaddr = obj->phys_handle->vaddr;
	struct sg_table *st;
	struct scatterlist *sg;
	int i;
178

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
		return -EINVAL;

	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page))
			return PTR_ERR(page);

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

195
		put_page(page);
196 197 198
		vaddr += PAGE_SIZE;
	}

199
	i915_gem_chipset_flush(to_i915(obj->base.dev));
200 201 202 203 204 205 206 207 208 209 210 211 212

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return -ENOMEM;
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
213

214 215 216 217 218 219 220 221 222 223 224 225 226
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

	obj->pages = st;
	return 0;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
{
	int ret;

	BUG_ON(obj->madv == __I915_MADV_PURGED);
227

228
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
229
	if (WARN_ON(ret)) {
230 231 232 233 234 235 236 237 238 239
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;

	if (obj->dirty) {
240
		struct address_space *mapping = obj->base.filp->f_mapping;
241
		char *vaddr = obj->phys_handle->vaddr;
242 243 244
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
245 246 247 248 249 250 251 252 253 254 255 256 257 258
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
			if (obj->madv == I915_MADV_WILLNEED)
259
				mark_page_accessed(page);
260
			put_page(page);
261 262
			vaddr += PAGE_SIZE;
		}
263
		obj->dirty = 0;
264 265
	}

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
	sg_free_table(obj->pages);
	kfree(obj->pages);
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
	drm_pci_free(obj->base.dev, obj->phys_handle);
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

282
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
283 284 285
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
286 287 288
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
289

290 291 292 293
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
294
	 */
295 296 297 298 299 300
	ret = i915_gem_object_wait_rendering(obj, false);
	if (ret)
		return ret;

	i915_gem_retire_requests(to_i915(obj->base.dev));

301 302 303 304 305 306 307 308 309 310 311 312 313
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
 * @obj: i915 gem object
 * @readonly: waiting for just read access or read-write access
 */
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly)
{
	struct reservation_object *resv;
	struct i915_gem_active *active;
	unsigned long active_mask;
	int idx;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	if (!readonly) {
		active = obj->last_read;
		active_mask = i915_gem_object_get_active(obj);
	} else {
		active_mask = 1;
		active = &obj->last_write;
	}

	for_each_active(active_mask, idx) {
		int ret;

		ret = i915_gem_active_wait(&active[idx],
					   &obj->base.dev->struct_mutex);
		if (ret)
			return ret;
	}

	resv = i915_gem_object_get_dmabuf_resv(obj);
	if (resv) {
		long err;

		err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
							  MAX_SCHEDULE_TIMEOUT);
		if (err < 0)
			return err;
	}

	return 0;
}

361 362 363
/* A nonblocking variant of the above wait. Must be called prior to
 * acquiring the mutex for the object, as the object state may change
 * during this call. A reference must be held by the caller for the object.
364 365
 */
static __must_check int
366 367 368
__unsafe_wait_rendering(struct drm_i915_gem_object *obj,
			struct intel_rps_client *rps,
			bool readonly)
369 370 371
{
	struct i915_gem_active *active;
	unsigned long active_mask;
372
	int idx;
373

374
	active_mask = __I915_BO_ACTIVE(obj);
375 376 377 378 379 380 381 382 383 384
	if (!active_mask)
		return 0;

	if (!readonly) {
		active = obj->last_read;
	} else {
		active_mask = 1;
		active = &obj->last_write;
	}

385 386
	for_each_active(active_mask, idx) {
		int ret;
387

388
		ret = i915_gem_active_wait_unlocked(&active[idx],
389 390
						    I915_WAIT_INTERRUPTIBLE,
						    NULL, rps);
391 392
		if (ret)
			return ret;
393 394
	}

395
	return 0;
396 397 398 399 400 401 402 403 404
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

	return &fpriv->rps;
}

405 406 407 408 409
int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
	drm_dma_handle_t *phys;
410
	int ret;
411 412 413 414 415 416 417 418 419 420 421 422 423 424

	if (obj->phys_handle) {
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
			return -EBUSY;

		return 0;
	}

	if (obj->madv != I915_MADV_WILLNEED)
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

C
Chris Wilson 已提交
425 426 427 428 429
	ret = i915_gem_object_unbind(obj);
	if (ret)
		return ret;

	ret = i915_gem_object_put_pages(obj);
430 431 432
	if (ret)
		return ret;

433 434 435 436 437 438
	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
	if (!phys)
		return -ENOMEM;

	obj->phys_handle = phys;
439 440 441
	obj->ops = &i915_gem_phys_ops;

	return i915_gem_object_get_pages(obj);
442 443 444 445 446 447 448 449 450
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
451
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
452
	int ret = 0;
453 454 455 456 457 458 459

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
	ret = i915_gem_object_wait_rendering(obj, false);
	if (ret)
		return ret;
460

461
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
462 463 464 465 466 467 468 469 470 471
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
472 473 474 475
		if (unwritten) {
			ret = -EFAULT;
			goto out;
		}
476 477
	}

478
	drm_clflush_virt_range(vaddr, args->size);
479
	i915_gem_chipset_flush(to_i915(dev));
480 481

out:
482
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
483
	return ret;
484 485
}

486 487
void *i915_gem_object_alloc(struct drm_device *dev)
{
488
	struct drm_i915_private *dev_priv = to_i915(dev);
489
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
490 491 492 493
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
494
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
495
	kmem_cache_free(dev_priv->objects, obj);
496 497
}

498 499 500 501 502
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
503
{
504
	struct drm_i915_gem_object *obj;
505 506
	int ret;
	u32 handle;
507

508
	size = roundup(size, PAGE_SIZE);
509 510
	if (size == 0)
		return -EINVAL;
511 512

	/* Allocate the new object */
513
	obj = i915_gem_object_create(dev, size);
514 515
	if (IS_ERR(obj))
		return PTR_ERR(obj);
516

517
	ret = drm_gem_handle_create(file, &obj->base, &handle);
518
	/* drop reference from allocate - handle holds it now */
519
	i915_gem_object_put_unlocked(obj);
520 521
	if (ret)
		return ret;
522

523
	*handle_p = handle;
524 525 526
	return 0;
}

527 528 529 530 531 532
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
533
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
534 535
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
536
			       args->size, &args->handle);
537 538 539 540
}

/**
 * Creates a new mm object and returns a handle to it.
541 542 543
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
544 545 546 547 548 549
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
550

551
	return i915_gem_create(file, dev,
552
			       args->size, &args->handle);
553 554
}

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

581
static inline int
582 583
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

607 608 609 610 611 612
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
613
				    unsigned int *needs_clflush)
614 615 616 617 618
{
	int ret;

	*needs_clflush = 0;

619 620
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
621

622 623 624 625
	ret = i915_gem_object_wait_rendering(obj, true);
	if (ret)
		return ret;

626 627 628 629 630 631
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

632 633
	i915_gem_object_flush_gtt_write_domain(obj);

634 635 636 637 638 639
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
640 641
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
642 643 644

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
645 646 647
		if (ret)
			goto err_unpin;

648
		*needs_clflush = 0;
649 650
	}

651
	/* return with the pages pinned */
652
	return 0;
653 654 655 656

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

	ret = i915_gem_object_wait_rendering(obj, false);
	if (ret)
		return ret;

672 673 674 675 676 677
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

678 679
	i915_gem_object_flush_gtt_write_domain(obj);

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;

	/* Same trick applies to invalidate partially written cachelines read
	 * before writing.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
							 obj->cache_level);

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
697 698 699
		if (ret)
			goto err_unpin;

700 701 702 703 704 705 706 707
		*needs_clflush = 0;
	}

	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
		obj->cache_dirty = true;

	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
	obj->dirty = 1;
708
	/* return with the pages pinned */
709
	return 0;
710 711 712 713

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
714 715
}

716 717 718
/* Per-page copy function for the shmem pread fastpath.
 * Flushes invalid cachelines before reading the target if
 * needs_clflush is set. */
719
static int
720 721 722 723 724 725 726
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

727
	if (unlikely(page_do_bit17_swizzling))
728 729 730 731 732 733 734 735 736 737 738
		return -EINVAL;

	vaddr = kmap_atomic(page);
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_to_user_inatomic(user_data,
				      vaddr + shmem_page_offset,
				      page_length);
	kunmap_atomic(vaddr);

739
	return ret ? -EFAULT : 0;
740 741
}

742 743 744 745
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
746
	if (unlikely(swizzled)) {
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

764 765 766 767 768 769 770 771 772 773 774 775
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
776 777 778
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
779 780 781 782 783 784 785 786 787 788 789

	if (page_do_bit17_swizzling)
		ret = __copy_to_user_swizzled(user_data,
					      vaddr, shmem_page_offset,
					      page_length);
	else
		ret = __copy_to_user(user_data,
				     vaddr + shmem_page_offset,
				     page_length);
	kunmap(page);

790
	return ret ? - EFAULT : 0;
791 792
}

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
static inline unsigned long
slow_user_access(struct io_mapping *mapping,
		 uint64_t page_base, int page_offset,
		 char __user *user_data,
		 unsigned long length, bool pwrite)
{
	void __iomem *ioaddr;
	void *vaddr;
	uint64_t unwritten;

	ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force *)ioaddr + page_offset;
	if (pwrite)
		unwritten = __copy_from_user(vaddr, user_data, length);
	else
		unwritten = __copy_to_user(user_data, vaddr, length);

	io_mapping_unmap(ioaddr);
	return unwritten;
}

static int
i915_gem_gtt_pread(struct drm_device *dev,
		   struct drm_i915_gem_object *obj, uint64_t size,
		   uint64_t data_offset, uint64_t data_ptr)
{
820
	struct drm_i915_private *dev_priv = to_i915(dev);
821
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
C
Chris Wilson 已提交
822
	struct i915_vma *vma;
823 824 825 826 827 828
	struct drm_mm_node node;
	char __user *user_data;
	uint64_t remain;
	uint64_t offset;
	int ret;

C
Chris Wilson 已提交
829
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
830 831 832
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
833
		ret = i915_vma_put_fence(vma);
834 835 836 837 838
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
839
	if (IS_ERR(vma)) {
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
		if (ret)
			goto out;

		ret = i915_gem_object_get_pages(obj);
		if (ret) {
			remove_mappable_node(&node);
			goto out;
		}

		i915_gem_object_pin_pages(obj);
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

	user_data = u64_to_user_ptr(data_ptr);
	remain = size;
	offset = data_offset;

	mutex_unlock(&dev->struct_mutex);
	if (likely(!i915.prefault_disable)) {
		ret = fault_in_multipages_writeable(user_data, remain);
		if (ret) {
			mutex_lock(&dev->struct_mutex);
			goto out_unpin;
		}
	}

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start,
					       I915_CACHE_NONE, 0);
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
		/* This is a slow read/write as it tries to read from
		 * and write to user memory which may result into page
		 * faults, and so we cannot perform this under struct_mutex.
		 */
895
		if (slow_user_access(&ggtt->mappable, page_base,
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
				     page_offset, user_data,
				     page_length, false)) {
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

	mutex_lock(&dev->struct_mutex);
	if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
		/* The user has modified the object whilst we tried
		 * reading from it, and we now have no idea what domain
		 * the pages should be in. As we have just been touching
		 * them directly, flush everything back to the GTT
		 * domain.
		 */
		ret = i915_gem_object_set_to_gtt_domain(obj, false);
	}

out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
				       node.start, node.size,
				       true);
		i915_gem_object_unpin_pages(obj);
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
927
		i915_vma_unpin(vma);
928 929 930 931 932
	}
out:
	return ret;
}

933
static int
934 935 936 937
i915_gem_shmem_pread(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args,
		     struct drm_file *file)
938
{
939
	char __user *user_data;
940
	ssize_t remain;
941
	loff_t offset;
942
	int shmem_page_offset, page_length, ret = 0;
943
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
944
	int prefaulted = 0;
945
	int needs_clflush = 0;
946
	struct sg_page_iter sg_iter;
947

948
	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
949 950 951
	if (ret)
		return ret;

952 953
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
	user_data = u64_to_user_ptr(args->data_ptr);
954
	offset = args->offset;
955
	remain = args->size;
956

957 958
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
959
		struct page *page = sg_page_iter_page(&sg_iter);
960 961 962 963

		if (remain <= 0)
			break;

964 965 966 967 968
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
969
		shmem_page_offset = offset_in_page(offset);
970 971 972 973
		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

974 975 976
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

977 978 979 980 981
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
		if (ret == 0)
			goto next_page;
982 983 984

		mutex_unlock(&dev->struct_mutex);

985
		if (likely(!i915.prefault_disable) && !prefaulted) {
986
			ret = fault_in_multipages_writeable(user_data, remain);
987 988 989 990 991 992 993
			/* Userspace is tricking us, but we've already clobbered
			 * its pages with the prefault and promised to write the
			 * data up to the first fault. Hence ignore any errors
			 * and just continue. */
			(void)ret;
			prefaulted = 1;
		}
994

995 996 997
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
998

999
		mutex_lock(&dev->struct_mutex);
1000 1001

		if (ret)
1002 1003
			goto out;

1004
next_page:
1005
		remain -= page_length;
1006
		user_data += page_length;
1007 1008 1009
		offset += page_length;
	}

1010
out:
1011
	i915_gem_obj_finish_shmem_access(obj);
1012

1013 1014 1015
	return ret;
}

1016 1017
/**
 * Reads data from the object referenced by handle.
1018 1019 1020
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1021 1022 1023 1024 1025
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1026
		     struct drm_file *file)
1027 1028
{
	struct drm_i915_gem_pread *args = data;
1029
	struct drm_i915_gem_object *obj;
1030
	int ret = 0;
1031

1032 1033 1034 1035
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1036
		       u64_to_user_ptr(args->data_ptr),
1037 1038 1039
		       args->size))
		return -EFAULT;

1040
	obj = i915_gem_object_lookup(file, args->handle);
1041 1042
	if (!obj)
		return -ENOENT;
1043

1044
	/* Bounds check source.  */
1045 1046
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1047
		ret = -EINVAL;
1048
		goto err;
C
Chris Wilson 已提交
1049 1050
	}

C
Chris Wilson 已提交
1051 1052
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1053 1054 1055 1056 1057 1058 1059 1060
	ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
	if (ret)
		goto err;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err;

1061
	ret = i915_gem_shmem_pread(dev, obj, args, file);
1062

1063
	/* pread for non shmem backed objects */
1064 1065
	if (ret == -EFAULT || ret == -ENODEV) {
		intel_runtime_pm_get(to_i915(dev));
1066 1067
		ret = i915_gem_gtt_pread(dev, obj, args->size,
					args->offset, args->data_ptr);
1068 1069
		intel_runtime_pm_put(to_i915(dev));
	}
1070

1071
	i915_gem_object_put(obj);
1072
	mutex_unlock(&dev->struct_mutex);
1073 1074 1075 1076 1077

	return ret;

err:
	i915_gem_object_put_unlocked(obj);
1078
	return ret;
1079 1080
}

1081 1082
/* This is the fast write path which cannot handle
 * page faults in the source data
1083
 */
1084 1085 1086 1087 1088 1089

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
1090
{
1091 1092
	void __iomem *vaddr_atomic;
	void *vaddr;
1093
	unsigned long unwritten;
1094

P
Peter Zijlstra 已提交
1095
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
1096 1097 1098
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force*)vaddr_atomic + page_offset;
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
1099
						      user_data, length);
P
Peter Zijlstra 已提交
1100
	io_mapping_unmap_atomic(vaddr_atomic);
1101
	return unwritten;
1102 1103
}

1104 1105 1106
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1107
 * @i915: i915 device private data
1108 1109 1110
 * @obj: i915 gem object
 * @args: pwrite arguments structure
 * @file: drm file pointer
1111
 */
1112
static int
1113
i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
1114
			 struct drm_i915_gem_object *obj,
1115
			 struct drm_i915_gem_pwrite *args,
1116
			 struct drm_file *file)
1117
{
1118
	struct i915_ggtt *ggtt = &i915->ggtt;
1119
	struct drm_device *dev = obj->base.dev;
C
Chris Wilson 已提交
1120
	struct i915_vma *vma;
1121 1122
	struct drm_mm_node node;
	uint64_t remain, offset;
1123
	char __user *user_data;
1124
	int ret;
1125 1126
	bool hit_slow_path = false;

1127
	if (i915_gem_object_is_tiled(obj))
1128
		return -EFAULT;
D
Daniel Vetter 已提交
1129

C
Chris Wilson 已提交
1130
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1131
				       PIN_MAPPABLE | PIN_NONBLOCK);
1132 1133 1134
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1135
		ret = i915_vma_put_fence(vma);
1136 1137 1138 1139 1140
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1141
	if (IS_ERR(vma)) {
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
		ret = insert_mappable_node(i915, &node, PAGE_SIZE);
		if (ret)
			goto out;

		ret = i915_gem_object_get_pages(obj);
		if (ret) {
			remove_mappable_node(&node);
			goto out;
		}

		i915_gem_object_pin_pages(obj);
	}
D
Daniel Vetter 已提交
1154 1155 1156 1157 1158

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1159
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1160
	obj->dirty = true;
1161

1162 1163 1164 1165
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1166 1167
		/* Operation in this page
		 *
1168 1169 1170
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1171
		 */
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1185
		/* If we get a fault while copying data, then (presumably) our
1186 1187
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1188 1189
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1190
		 */
1191
		if (fast_user_write(&ggtt->mappable, page_base,
D
Daniel Vetter 已提交
1192
				    page_offset, user_data, page_length)) {
1193 1194
			hit_slow_path = true;
			mutex_unlock(&dev->struct_mutex);
1195
			if (slow_user_access(&ggtt->mappable,
1196 1197 1198 1199 1200 1201 1202 1203 1204
					     page_base,
					     page_offset, user_data,
					     page_length, true)) {
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto out_flush;
			}

			mutex_lock(&dev->struct_mutex);
D
Daniel Vetter 已提交
1205
		}
1206

1207 1208 1209
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1210 1211
	}

1212
out_flush:
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
	if (hit_slow_path) {
		if (ret == 0 &&
		    (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
			/* The user has modified the object whilst we tried
			 * reading from it, and we now have no idea what domain
			 * the pages should be in. As we have just been touching
			 * them directly, flush everything back to the GTT
			 * domain.
			 */
			ret = i915_gem_object_set_to_gtt_domain(obj, false);
		}
	}

1226
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
D
Daniel Vetter 已提交
1227
out_unpin:
1228 1229 1230 1231 1232 1233 1234 1235
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
				       node.start, node.size,
				       true);
		i915_gem_object_unpin_pages(obj);
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1236
		i915_vma_unpin(vma);
1237
	}
D
Daniel Vetter 已提交
1238
out:
1239
	return ret;
1240 1241
}

1242 1243 1244 1245
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set. */
1246
static int
1247 1248 1249 1250 1251
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1252
{
1253
	char *vaddr;
1254
	int ret;
1255

1256
	if (unlikely(page_do_bit17_swizzling))
1257
		return -EINVAL;
1258

1259 1260 1261 1262
	vaddr = kmap_atomic(page);
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
1263 1264
	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
					user_data, page_length);
1265 1266 1267 1268
	if (needs_clflush_after)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	kunmap_atomic(vaddr);
1269

1270
	return ret ? -EFAULT : 0;
1271 1272
}

1273 1274
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
1275
static int
1276 1277 1278 1279 1280
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1281
{
1282 1283
	char *vaddr;
	int ret;
1284

1285
	vaddr = kmap(page);
1286
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1287 1288 1289
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
1290 1291
	if (page_do_bit17_swizzling)
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1292 1293
						user_data,
						page_length);
1294 1295 1296 1297 1298
	else
		ret = __copy_from_user(vaddr + shmem_page_offset,
				       user_data,
				       page_length);
	if (needs_clflush_after)
1299 1300 1301
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
1302
	kunmap(page);
1303

1304
	return ret ? -EFAULT : 0;
1305 1306 1307
}

static int
1308 1309 1310 1311
i915_gem_shmem_pwrite(struct drm_device *dev,
		      struct drm_i915_gem_object *obj,
		      struct drm_i915_gem_pwrite *args,
		      struct drm_file *file)
1312 1313
{
	ssize_t remain;
1314 1315
	loff_t offset;
	char __user *user_data;
1316
	int shmem_page_offset, page_length, ret = 0;
1317
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1318
	int hit_slowpath = 0;
1319
	unsigned int needs_clflush;
1320
	struct sg_page_iter sg_iter;
1321

1322
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1323 1324 1325
	if (ret)
		return ret;

1326 1327
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
	user_data = u64_to_user_ptr(args->data_ptr);
1328
	offset = args->offset;
1329
	remain = args->size;
1330

1331 1332
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
1333
		struct page *page = sg_page_iter_page(&sg_iter);
1334
		int partial_cacheline_write;
1335

1336 1337 1338
		if (remain <= 0)
			break;

1339 1340 1341 1342 1343
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
1344
		shmem_page_offset = offset_in_page(offset);
1345 1346 1347 1348 1349

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

1350 1351 1352
		/* If we don't overwrite a cacheline completely we need to be
		 * careful to have up-to-date data by first clflushing. Don't
		 * overcomplicate things and flush the entire patch. */
1353
		partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
1354 1355 1356
			((shmem_page_offset | page_length)
				& (boot_cpu_data.x86_clflush_size - 1));

1357 1358 1359
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

1360 1361 1362
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
1363
					needs_clflush & CLFLUSH_AFTER);
1364 1365
		if (ret == 0)
			goto next_page;
1366 1367 1368

		hit_slowpath = 1;
		mutex_unlock(&dev->struct_mutex);
1369 1370 1371
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
1372
					needs_clflush & CLFLUSH_AFTER);
1373

1374
		mutex_lock(&dev->struct_mutex);
1375 1376

		if (ret)
1377 1378
			goto out;

1379
next_page:
1380
		remain -= page_length;
1381
		user_data += page_length;
1382
		offset += page_length;
1383 1384
	}

1385
out:
1386
	i915_gem_obj_finish_shmem_access(obj);
1387

1388
	if (hit_slowpath) {
1389 1390 1391 1392 1393
		/*
		 * Fixup: Flush cpu caches in case we didn't flush the dirty
		 * cachelines in-line while writing and the object moved
		 * out of the cpu write domain while we've dropped the lock.
		 */
1394
		if (!(needs_clflush & CLFLUSH_AFTER) &&
1395
		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1396
			if (i915_gem_clflush_object(obj, obj->pin_display))
1397
				needs_clflush |= CLFLUSH_AFTER;
1398
		}
1399
	}
1400

1401
	if (needs_clflush & CLFLUSH_AFTER)
1402
		i915_gem_chipset_flush(to_i915(dev));
1403

1404
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1405
	return ret;
1406 1407 1408 1409
}

/**
 * Writes data to the object referenced by handle.
1410 1411 1412
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1413 1414 1415 1416 1417
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1418
		      struct drm_file *file)
1419
{
1420
	struct drm_i915_private *dev_priv = to_i915(dev);
1421
	struct drm_i915_gem_pwrite *args = data;
1422
	struct drm_i915_gem_object *obj;
1423 1424 1425 1426 1427 1428
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1429
		       u64_to_user_ptr(args->data_ptr),
1430 1431 1432
		       args->size))
		return -EFAULT;

1433
	if (likely(!i915.prefault_disable)) {
1434
		ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1435 1436 1437 1438
						   args->size);
		if (ret)
			return -EFAULT;
	}
1439

1440
	obj = i915_gem_object_lookup(file, args->handle);
1441 1442
	if (!obj)
		return -ENOENT;
1443

1444
	/* Bounds check destination. */
1445 1446
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1447
		ret = -EINVAL;
1448
		goto err;
C
Chris Wilson 已提交
1449 1450
	}

C
Chris Wilson 已提交
1451 1452
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
	if (ret)
		goto err;

	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;

D
Daniel Vetter 已提交
1463
	ret = -EFAULT;
1464 1465 1466 1467 1468 1469
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1470 1471
	if (!i915_gem_object_has_struct_page(obj) ||
	    cpu_write_needs_clflush(obj)) {
1472
		ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
D
Daniel Vetter 已提交
1473 1474 1475
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
		 * textures). Fallback to the shmem path in that case. */
1476
	}
1477

1478
	if (ret == -EFAULT || ret == -ENOSPC) {
1479 1480
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1481
		else
1482
			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1483
	}
1484

1485
	i915_gem_object_put(obj);
1486
	mutex_unlock(&dev->struct_mutex);
1487 1488
	intel_runtime_pm_put(dev_priv);

1489
	return ret;
1490 1491 1492 1493 1494 1495

err_rpm:
	intel_runtime_pm_put(dev_priv);
err:
	i915_gem_object_put_unlocked(obj);
	return ret;
1496 1497
}

1498
static inline enum fb_op_origin
1499 1500
write_origin(struct drm_i915_gem_object *obj, unsigned domain)
{
1501 1502
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1503 1504
}

1505
/**
1506 1507
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1508 1509 1510
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1511 1512 1513
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1514
			  struct drm_file *file)
1515 1516
{
	struct drm_i915_gem_set_domain *args = data;
1517
	struct drm_i915_gem_object *obj;
1518 1519
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1520 1521
	int ret;

1522
	/* Only handle setting domains to types used by the CPU. */
1523
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1524 1525 1526 1527 1528 1529 1530 1531
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1532
	obj = i915_gem_object_lookup(file, args->handle);
1533 1534
	if (!obj)
		return -ENOENT;
1535

1536 1537 1538 1539
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1540 1541 1542 1543 1544
	ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
	if (ret)
		goto err;

	ret = i915_mutex_lock_interruptible(dev);
1545
	if (ret)
1546
		goto err;
1547

1548
	if (read_domains & I915_GEM_DOMAIN_GTT)
1549
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1550
	else
1551
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1552

1553
	if (write_domain != 0)
1554
		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1555

1556
	i915_gem_object_put(obj);
1557 1558
	mutex_unlock(&dev->struct_mutex);
	return ret;
1559 1560 1561 1562

err:
	i915_gem_object_put_unlocked(obj);
	return ret;
1563 1564 1565 1566
}

/**
 * Called when user space has done writes to this buffer
1567 1568 1569
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1570 1571 1572
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1573
			 struct drm_file *file)
1574 1575
{
	struct drm_i915_gem_sw_finish *args = data;
1576
	struct drm_i915_gem_object *obj;
1577
	int err = 0;
1578

1579
	obj = i915_gem_object_lookup(file, args->handle);
1580 1581
	if (!obj)
		return -ENOENT;
1582 1583

	/* Pinned buffers may be scanout, so flush the cache */
1584 1585 1586 1587 1588 1589 1590
	if (READ_ONCE(obj->pin_display)) {
		err = i915_mutex_lock_interruptible(dev);
		if (!err) {
			i915_gem_object_flush_cpu_write_domain(obj);
			mutex_unlock(&dev->struct_mutex);
		}
	}
1591

1592 1593
	i915_gem_object_put_unlocked(obj);
	return err;
1594 1595 1596
}

/**
1597 1598 1599 1600 1601
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1602 1603 1604
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1615 1616 1617
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1618
		    struct drm_file *file)
1619 1620
{
	struct drm_i915_gem_mmap *args = data;
1621
	struct drm_i915_gem_object *obj;
1622 1623
	unsigned long addr;

1624 1625 1626
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1627
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1628 1629
		return -ENODEV;

1630 1631
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1632
		return -ENOENT;
1633

1634 1635 1636
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1637
	if (!obj->base.filp) {
1638
		i915_gem_object_put_unlocked(obj);
1639 1640 1641
		return -EINVAL;
	}

1642
	addr = vm_mmap(obj->base.filp, 0, args->size,
1643 1644
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1645 1646 1647 1648
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1649
		if (down_write_killable(&mm->mmap_sem)) {
1650
			i915_gem_object_put_unlocked(obj);
1651 1652
			return -EINTR;
		}
1653 1654 1655 1656 1657 1658 1659
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1660 1661

		/* This may race, but that's ok, it only gets set */
1662
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1663
	}
1664
	i915_gem_object_put_unlocked(obj);
1665 1666 1667 1668 1669 1670 1671 1672
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
	u64 size;

	size = i915_gem_object_get_stride(obj);
	size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;

	return size >> PAGE_SHIFT;
}

1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
	return 1;
}

1733 1734
/**
 * i915_gem_fault - fault a page into the GTT
C
Chris Wilson 已提交
1735
 * @area: CPU VMA in question
1736
 * @vmf: fault info
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1748 1749 1750
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1751
 */
C
Chris Wilson 已提交
1752
int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1753
{
1754
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
C
Chris Wilson 已提交
1755
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1756
	struct drm_device *dev = obj->base.dev;
1757 1758
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1759
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
1760
	struct i915_vma *vma;
1761
	pgoff_t page_offset;
1762
	unsigned int flags;
1763
	int ret;
1764

1765
	/* We don't use vmf->pgoff since that has the fake offset */
C
Chris Wilson 已提交
1766
	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1767 1768
		PAGE_SHIFT;

C
Chris Wilson 已提交
1769 1770
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1771
	/* Try to flush the object off the GPU first without holding the lock.
1772
	 * Upon acquiring the lock, we will perform our sanity checks and then
1773 1774 1775
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
1776
	ret = __unsafe_wait_rendering(obj, NULL, !write);
1777
	if (ret)
1778 1779 1780 1781 1782 1783 1784
		goto err;

	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
1785

1786 1787
	/* Access to snoopable pages through the GTT is incoherent. */
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1788
		ret = -EFAULT;
1789
		goto err_unlock;
1790 1791
	}

1792 1793 1794 1795 1796 1797 1798 1799
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

1800
	/* Now pin it into the GTT as needed */
1801
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1802 1803
	if (IS_ERR(vma)) {
		struct i915_ggtt_view view;
1804 1805
		unsigned int chunk_size;

1806
		/* Use a partial view if it is bigger than available space */
1807 1808 1809
		chunk_size = MIN_CHUNK_PAGES;
		if (i915_gem_object_is_tiled(obj))
			chunk_size = max(chunk_size, tile_row_pages(obj));
1810

1811 1812 1813 1814
		memset(&view, 0, sizeof(view));
		view.type = I915_GGTT_VIEW_PARTIAL;
		view.params.partial.offset = rounddown(page_offset, chunk_size);
		view.params.partial.size =
1815
			min_t(unsigned int, chunk_size,
C
Chris Wilson 已提交
1816
			      (area->vm_end - area->vm_start) / PAGE_SIZE -
1817 1818
			      view.params.partial.offset);

1819 1820 1821 1822 1823 1824
		/* If the partial covers the entire object, just create a
		 * normal VMA.
		 */
		if (chunk_size >= obj->base.size >> PAGE_SHIFT)
			view.type = I915_GGTT_VIEW_NORMAL;

1825 1826 1827 1828 1829
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

1830 1831
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
1832 1833
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
1834
		goto err_unlock;
C
Chris Wilson 已提交
1835
	}
1836

1837 1838
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
1839
		goto err_unpin;
1840

1841
	ret = i915_vma_get_fence(vma);
1842
	if (ret)
1843
		goto err_unpin;
1844

1845
	/* Finally, remap it using the new GTT offset */
1846 1847 1848 1849 1850 1851 1852
	ret = remap_io_mapping(area,
			       area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);
	if (ret)
		goto err_unpin;
1853 1854

	obj->fault_mappable = true;
1855
err_unpin:
C
Chris Wilson 已提交
1856
	__i915_vma_unpin(vma);
1857
err_unlock:
1858
	mutex_unlock(&dev->struct_mutex);
1859 1860 1861
err_rpm:
	intel_runtime_pm_put(dev_priv);
err:
1862
	switch (ret) {
1863
	case -EIO:
1864 1865 1866 1867 1868 1869 1870
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1871 1872 1873
			ret = VM_FAULT_SIGBUS;
			break;
		}
1874
	case -EAGAIN:
D
Daniel Vetter 已提交
1875 1876 1877 1878
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1879
		 */
1880 1881
	case 0:
	case -ERESTARTSYS:
1882
	case -EINTR:
1883 1884 1885 1886 1887
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1888 1889
		ret = VM_FAULT_NOPAGE;
		break;
1890
	case -ENOMEM:
1891 1892
		ret = VM_FAULT_OOM;
		break;
1893
	case -ENOSPC:
1894
	case -EFAULT:
1895 1896
		ret = VM_FAULT_SIGBUS;
		break;
1897
	default:
1898
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1899 1900
		ret = VM_FAULT_SIGBUS;
		break;
1901
	}
1902
	return ret;
1903 1904
}

1905 1906 1907 1908
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1909
 * Preserve the reservation of the mmapping with the DRM core code, but
1910 1911 1912 1913 1914 1915 1916 1917 1918
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1919
void
1920
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1921
{
1922 1923 1924 1925 1926 1927
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
	 */
	lockdep_assert_held(&obj->base.dev->struct_mutex);

1928 1929
	if (!obj->fault_mappable)
		return;
1930

1931 1932
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();

1943
	obj->fault_mappable = false;
1944 1945
}

1946 1947 1948 1949 1950 1951 1952 1953 1954
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;

	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
		i915_gem_release_mmap(obj);
}

1955 1956
/**
 * i915_gem_get_ggtt_size - return required global GTT size for an object
1957
 * @dev_priv: i915 device
1958 1959 1960 1961 1962 1963
 * @size: object size
 * @tiling_mode: tiling mode
 *
 * Return the required global GTT size for an object, taking into account
 * potential fence register mapping.
 */
1964 1965
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
			   u64 size, int tiling_mode)
1966
{
1967
	u64 ggtt_size;
1968

1969 1970
	GEM_BUG_ON(size == 0);

1971
	if (INTEL_GEN(dev_priv) >= 4 ||
1972 1973
	    tiling_mode == I915_TILING_NONE)
		return size;
1974 1975

	/* Previous chips need a power-of-two fence region when tiling */
1976
	if (IS_GEN3(dev_priv))
1977
		ggtt_size = 1024*1024;
1978
	else
1979
		ggtt_size = 512*1024;
1980

1981 1982
	while (ggtt_size < size)
		ggtt_size <<= 1;
1983

1984
	return ggtt_size;
1985 1986
}

1987
/**
1988
 * i915_gem_get_ggtt_alignment - return required global GTT alignment
1989
 * @dev_priv: i915 device
1990 1991
 * @size: object size
 * @tiling_mode: tiling mode
1992
 * @fenced: is fenced alignment required or not
1993
 *
1994
 * Return the required global GTT alignment for an object, taking into account
1995
 * potential fence register mapping.
1996
 */
1997
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
1998
				int tiling_mode, bool fenced)
1999
{
2000 2001
	GEM_BUG_ON(size == 0);

2002 2003 2004 2005
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
2006
	if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
2007
	    tiling_mode == I915_TILING_NONE)
2008 2009
		return 4096;

2010 2011 2012 2013
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
2014
	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2015 2016
}

2017 2018
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2019
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2020
	int err;
2021

2022 2023 2024
	err = drm_gem_create_mmap_offset(&obj->base);
	if (!err)
		return 0;
2025

2026 2027 2028
	/* We can idle the GPU locklessly to flush stale objects, but in order
	 * to claim that space for ourselves, we need to take the big
	 * struct_mutex to free the requests+objects and allocate our slot.
2029
	 */
2030
	err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2031 2032 2033 2034 2035 2036 2037 2038 2039
	if (err)
		return err;

	err = i915_mutex_lock_interruptible(&dev_priv->drm);
	if (!err) {
		i915_gem_retire_requests(dev_priv);
		err = drm_gem_create_mmap_offset(&obj->base);
		mutex_unlock(&dev_priv->drm.struct_mutex);
	}
2040

2041
	return err;
2042 2043 2044 2045 2046 2047 2048
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2049
int
2050 2051
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2052
		  uint32_t handle,
2053
		  uint64_t *offset)
2054
{
2055
	struct drm_i915_gem_object *obj;
2056 2057
	int ret;

2058
	obj = i915_gem_object_lookup(file, handle);
2059 2060
	if (!obj)
		return -ENOENT;
2061

2062
	ret = i915_gem_object_create_mmap_offset(obj);
2063 2064
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2065

2066
	i915_gem_object_put_unlocked(obj);
2067
	return ret;
2068 2069
}

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2091
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2092 2093
}

D
Daniel Vetter 已提交
2094 2095 2096
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2097
{
2098
	i915_gem_object_free_mmap_offset(obj);
2099

2100 2101
	if (obj->base.filp == NULL)
		return;
2102

D
Daniel Vetter 已提交
2103 2104 2105 2106 2107
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2108
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
D
Daniel Vetter 已提交
2109 2110
	obj->madv = __I915_MADV_PURGED;
}
2111

2112 2113 2114
/* Try to discard unwanted pages */
static void
i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2115
{
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
	struct address_space *mapping;

	switch (obj->madv) {
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2128
	mapping = obj->base.filp->f_mapping,
2129
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2130 2131
}

2132
static void
2133
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2134
{
2135 2136
	struct sgt_iter sgt_iter;
	struct page *page;
2137
	int ret;
2138

2139
	BUG_ON(obj->madv == __I915_MADV_PURGED);
2140

C
Chris Wilson 已提交
2141
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
2142
	if (WARN_ON(ret)) {
C
Chris Wilson 已提交
2143 2144 2145
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
2146
		i915_gem_clflush_object(obj, true);
C
Chris Wilson 已提交
2147 2148 2149
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

I
Imre Deak 已提交
2150 2151
	i915_gem_gtt_finish_object(obj);

2152
	if (i915_gem_object_needs_bit17_swizzle(obj))
2153 2154
		i915_gem_object_save_bit_17_swizzle(obj);

2155 2156
	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
2157

2158
	for_each_sgt_page(page, sgt_iter, obj->pages) {
2159
		if (obj->dirty)
2160
			set_page_dirty(page);
2161

2162
		if (obj->madv == I915_MADV_WILLNEED)
2163
			mark_page_accessed(page);
2164

2165
		put_page(page);
2166
	}
2167
	obj->dirty = 0;
2168

2169 2170
	sg_free_table(obj->pages);
	kfree(obj->pages);
2171
}
C
Chris Wilson 已提交
2172

2173
int
2174 2175 2176 2177
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
	const struct drm_i915_gem_object_ops *ops = obj->ops;

2178
	if (obj->pages == NULL)
2179 2180
		return 0;

2181 2182 2183
	if (obj->pages_pin_count)
		return -EBUSY;

2184
	GEM_BUG_ON(obj->bind_count);
B
Ben Widawsky 已提交
2185

2186 2187 2188
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2189
	list_del(&obj->global_list);
2190

2191
	if (obj->mapping) {
2192 2193 2194 2195 2196
		void *ptr;

		ptr = ptr_mask_bits(obj->mapping);
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2197
		else
2198 2199
			kunmap(kmap_to_page(ptr));

2200 2201 2202
		obj->mapping = NULL;
	}

2203
	ops->put_pages(obj);
2204
	obj->pages = NULL;
2205

2206
	i915_gem_object_invalidate(obj);
C
Chris Wilson 已提交
2207 2208 2209 2210

	return 0;
}

2211
static int
C
Chris Wilson 已提交
2212
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2213
{
2214
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2215 2216
	int page_count, i;
	struct address_space *mapping;
2217 2218
	struct sg_table *st;
	struct scatterlist *sg;
2219
	struct sgt_iter sgt_iter;
2220
	struct page *page;
2221
	unsigned long last_pfn = 0;	/* suppress gcc warning */
I
Imre Deak 已提交
2222
	int ret;
C
Chris Wilson 已提交
2223
	gfp_t gfp;
2224

C
Chris Wilson 已提交
2225 2226 2227 2228 2229 2230 2231
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);

2232 2233 2234 2235
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

2236
	page_count = obj->base.size / PAGE_SIZE;
2237 2238
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2239
		return -ENOMEM;
2240
	}
2241

2242 2243 2244 2245 2246
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2247
	mapping = obj->base.filp->f_mapping;
2248
	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2249
	gfp |= __GFP_NORETRY | __GFP_NOWARN;
2250 2251 2252
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
2253 2254
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
2255 2256 2257 2258 2259
			i915_gem_shrink(dev_priv,
					page_count,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_PURGEABLE);
C
Chris Wilson 已提交
2260 2261 2262 2263 2264 2265 2266 2267
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
			i915_gem_shrink_all(dev_priv);
2268
			page = shmem_read_mapping_page(mapping, i);
I
Imre Deak 已提交
2269 2270
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
C
Chris Wilson 已提交
2271
				goto err_pages;
I
Imre Deak 已提交
2272
			}
C
Chris Wilson 已提交
2273
		}
2274 2275 2276 2277 2278 2279 2280 2281
#ifdef CONFIG_SWIOTLB
		if (swiotlb_nr_tbl()) {
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
			sg = sg_next(sg);
			continue;
		}
#endif
2282 2283 2284 2285 2286 2287 2288 2289 2290
		if (!i || page_to_pfn(page) != last_pfn + 1) {
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2291 2292 2293

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2294
	}
2295 2296 2297 2298
#ifdef CONFIG_SWIOTLB
	if (!swiotlb_nr_tbl())
#endif
		sg_mark_end(sg);
2299 2300
	obj->pages = st;

I
Imre Deak 已提交
2301 2302 2303 2304
	ret = i915_gem_gtt_prepare_object(obj);
	if (ret)
		goto err_pages;

2305
	if (i915_gem_object_needs_bit17_swizzle(obj))
2306 2307
		i915_gem_object_do_bit_17_swizzle(obj);

2308
	if (i915_gem_object_is_tiled(obj) &&
2309 2310 2311
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		i915_gem_object_pin_pages(obj);

2312 2313 2314
	return 0;

err_pages:
2315
	sg_mark_end(sg);
2316 2317
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2318 2319
	sg_free_table(st);
	kfree(st);
2320 2321 2322 2323 2324 2325 2326 2327 2328

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2329 2330 2331 2332
	if (ret == -ENOSPC)
		ret = -ENOMEM;

	return ret;
2333 2334
}

2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
/* Ensure that the associated pages are gathered from the backing storage
 * and pinned into our object. i915_gem_object_get_pages() may be called
 * multiple times before they are released by a single call to
 * i915_gem_object_put_pages() - once the pages are no longer referenced
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
2345
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2346 2347 2348
	const struct drm_i915_gem_object_ops *ops = obj->ops;
	int ret;

2349
	if (obj->pages)
2350 2351
		return 0;

2352
	if (obj->madv != I915_MADV_WILLNEED) {
2353
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
2354
		return -EFAULT;
2355 2356
	}

2357 2358
	BUG_ON(obj->pages_pin_count);

2359 2360 2361 2362
	ret = ops->get_pages(obj);
	if (ret)
		return ret;

2363
	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2364 2365 2366 2367

	obj->get_page.sg = obj->pages->sgl;
	obj->get_page.last = 0;

2368
	return 0;
2369 2370
}

2371
/* The 'mapping' part of i915_gem_object_pin_map() below */
2372 2373
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2374 2375 2376
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
	struct sg_table *sgt = obj->pages;
2377 2378
	struct sgt_iter sgt_iter;
	struct page *page;
2379 2380
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2381
	unsigned long i = 0;
2382
	pgprot_t pgprot;
2383 2384 2385
	void *addr;

	/* A single page can always be kmapped */
2386
	if (n_pages == 1 && type == I915_MAP_WB)
2387 2388
		return kmap(sg_page(sgt->sgl));

2389 2390 2391 2392 2393 2394
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
		if (!pages)
			return NULL;
	}
2395

2396 2397
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2398 2399 2400 2401

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2402 2403 2404 2405 2406 2407 2408 2409 2410
	switch (type) {
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2411

2412 2413
	if (pages != stack_pages)
		drm_free_large(pages);
2414 2415 2416 2417 2418

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2419 2420
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2421
{
2422 2423 2424
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2425 2426 2427
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
2428
	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2429 2430 2431 2432 2433 2434

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	i915_gem_object_pin_pages(obj);
2435
	pinned = obj->pages_pin_count > 1;
2436

2437 2438 2439 2440 2441
	ptr = ptr_unpack_bits(obj->mapping, has_type);
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
			goto err;
2442
		}
2443 2444 2445 2446 2447 2448 2449

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

		ptr = obj->mapping = NULL;
2450 2451
	}

2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
			goto err;
		}

		obj->mapping = ptr_pack_bits(ptr, type);
	}

	return ptr;

err:
	i915_gem_object_unpin_pages(obj);
	return ERR_PTR(ret);
2467 2468
}

2469
static void
2470 2471
i915_gem_object_retire__write(struct i915_gem_active *active,
			      struct drm_i915_gem_request *request)
B
Ben Widawsky 已提交
2472
{
2473 2474
	struct drm_i915_gem_object *obj =
		container_of(active, struct drm_i915_gem_object, last_write);
2475

2476
	intel_fb_obj_flush(obj, true, ORIGIN_CS);
B
Ben Widawsky 已提交
2477 2478
}

2479
static void
2480 2481
i915_gem_object_retire__read(struct i915_gem_active *active,
			     struct drm_i915_gem_request *request)
2482
{
2483 2484 2485
	int idx = request->engine->id;
	struct drm_i915_gem_object *obj =
		container_of(active, struct drm_i915_gem_object, last_read[idx]);
2486

2487
	GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
2488

2489 2490
	i915_gem_object_clear_active(obj, idx);
	if (i915_gem_object_is_active(obj))
2491
		return;
2492

2493 2494 2495 2496
	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
2497 2498 2499
	if (obj->bind_count)
		list_move_tail(&obj->global_list,
			       &request->i915->mm.bound_list);
2500

2501
	i915_gem_object_put(obj);
2502 2503
}

2504
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2505
{
2506
	unsigned long elapsed;
2507

2508
	if (ctx->hang_stats.banned)
2509 2510
		return true;

2511
	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2512 2513
	if (ctx->hang_stats.ban_period_seconds &&
	    elapsed <= ctx->hang_stats.ban_period_seconds) {
2514 2515
		DRM_DEBUG("context hanging too fast, banning!\n");
		return true;
2516 2517 2518 2519 2520
	}

	return false;
}

2521
static void i915_set_reset_status(struct i915_gem_context *ctx,
2522
				  const bool guilty)
2523
{
2524
	struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2525 2526

	if (guilty) {
2527
		hs->banned = i915_context_is_banned(ctx);
2528 2529 2530 2531
		hs->batch_active++;
		hs->guilty_ts = get_seconds();
	} else {
		hs->batch_pending++;
2532 2533 2534
	}
}

2535
struct drm_i915_gem_request *
2536
i915_gem_find_active_request(struct intel_engine_cs *engine)
2537
{
2538 2539
	struct drm_i915_gem_request *request;

2540 2541 2542 2543 2544 2545 2546 2547
	/* We are called by the error capture and reset at a random
	 * point in time. In particular, note that neither is crucially
	 * ordered with an interrupt. After a hang, the GPU is dead and we
	 * assume that no more writes can happen (we waited long enough for
	 * all writes that were in transaction to be flushed) - adding an
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
	 */
2548
	list_for_each_entry(request, &engine->request_list, link) {
2549
		if (i915_gem_request_completed(request))
2550
			continue;
2551

2552 2553 2554
		if (!i915_sw_fence_done(&request->submit))
			break;

2555
		return request;
2556
	}
2557 2558 2559 2560

	return NULL;
}

2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578
static void reset_request(struct drm_i915_gem_request *request)
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
}

static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2579 2580
{
	struct drm_i915_gem_request *request;
2581
	struct i915_gem_context *incomplete_ctx;
2582 2583
	bool ring_hung;

2584 2585 2586
	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);

2587
	request = i915_gem_find_active_request(engine);
2588
	if (!request)
2589 2590
		return;

2591
	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2592 2593 2594
	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
		ring_hung = false;

2595
	i915_set_reset_status(request->ctx, ring_hung);
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
	if (!ring_hung)
		return;

	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
			 engine->name, request->fence.seqno);

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset_hw(engine, request);

	/* Users of the default context do not rely on logical state
	 * preserved between batches. They have to emit full state on
	 * every batch and so it is safe to execute queued requests following
	 * the hang.
	 *
	 * Other contexts preserve state, now corrupt. We want to skip all
	 * queued requests that reference the corrupt context.
	 */
	incomplete_ctx = request->ctx;
	if (i915_gem_context_is_default(incomplete_ctx))
		return;

2617
	list_for_each_entry_continue(request, &engine->request_list, link)
2618 2619
		if (request->ctx == incomplete_ctx)
			reset_request(request);
2620
}
2621

2622
void i915_gem_reset(struct drm_i915_private *dev_priv)
2623
{
2624
	struct intel_engine_cs *engine;
2625

2626 2627 2628 2629 2630 2631
	i915_gem_retire_requests(dev_priv);

	for_each_engine(engine, dev_priv)
		i915_gem_reset_engine(engine);

	i915_gem_restore_fences(&dev_priv->drm);
2632 2633 2634 2635 2636 2637 2638

	if (dev_priv->gt.awake) {
		intel_sanitize_gt_powersave(dev_priv);
		intel_enable_gt_powersave(dev_priv);
		if (INTEL_GEN(dev_priv) >= 6)
			gen6_rps_busy(dev_priv);
	}
2639 2640 2641 2642 2643 2644 2645 2646 2647
}

static void nop_submit_request(struct drm_i915_gem_request *request)
{
}

static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
{
	engine->submit_request = nop_submit_request;
2648

2649 2650 2651 2652
	/* Mark all pending requests as complete so that any concurrent
	 * (lockless) lookup doesn't try and wait upon the request as we
	 * reset it.
	 */
2653
	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2654

2655 2656 2657 2658 2659 2660
	/*
	 * Clear the execlists queue up before freeing the requests, as those
	 * are the ones that keep the context and ringbuffer backing objects
	 * pinned in place.
	 */

2661
	if (i915.enable_execlists) {
2662 2663 2664 2665 2666 2667
		spin_lock(&engine->execlist_lock);
		INIT_LIST_HEAD(&engine->execlist_queue);
		i915_gem_request_put(engine->execlist_port[0].request);
		i915_gem_request_put(engine->execlist_port[1].request);
		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
		spin_unlock(&engine->execlist_lock);
2668 2669
	}

2670
	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2671 2672
}

2673
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2674
{
2675
	struct intel_engine_cs *engine;
2676

2677 2678
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2679

2680
	i915_gem_context_lost(dev_priv);
2681
	for_each_engine(engine, dev_priv)
2682
		i915_gem_cleanup_engine(engine);
2683
	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2684

2685
	i915_gem_retire_requests(dev_priv);
2686 2687
}

2688
static void
2689 2690
i915_gem_retire_work_handler(struct work_struct *work)
{
2691
	struct drm_i915_private *dev_priv =
2692
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
2693
	struct drm_device *dev = &dev_priv->drm;
2694

2695
	/* Come back later if the device is busy... */
2696
	if (mutex_trylock(&dev->struct_mutex)) {
2697
		i915_gem_retire_requests(dev_priv);
2698
		mutex_unlock(&dev->struct_mutex);
2699
	}
2700 2701 2702 2703 2704

	/* Keep the retire handler running until we are finally idle.
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
2705 2706
	if (READ_ONCE(dev_priv->gt.awake)) {
		i915_queue_hangcheck(dev_priv);
2707 2708
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
2709
				   round_jiffies_up_relative(HZ));
2710
	}
2711
}
2712

2713 2714 2715 2716
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
2717
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
2718
	struct drm_device *dev = &dev_priv->drm;
2719
	struct intel_engine_cs *engine;
2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

	if (READ_ONCE(dev_priv->gt.active_engines))
		return;

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

	if (!mutex_trylock(&dev->struct_mutex)) {
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

	if (dev_priv->gt.active_engines)
		goto out_unlock;
2741

2742
	for_each_engine(engine, dev_priv)
2743
		i915_gem_batch_pool_fini(&engine->batch_pool);
2744

2745 2746 2747
	GEM_BUG_ON(!dev_priv->gt.awake);
	dev_priv->gt.awake = false;
	rearm_hangcheck = false;
2748

2749 2750 2751 2752 2753
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_idle(dev_priv);
	intel_runtime_pm_put(dev_priv);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
2754

2755 2756 2757 2758
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
2759
	}
2760 2761
}

2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
	struct i915_vma *vma, *vn;

	mutex_lock(&obj->base.dev->struct_mutex);
	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
		if (vma->vm->file == fpriv)
			i915_vma_close(vma);
	mutex_unlock(&obj->base.dev->struct_mutex);
}

2775 2776
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2777 2778 2779
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
2803
	struct intel_rps_client *rps = to_rps_client(file);
2804
	struct drm_i915_gem_object *obj;
2805 2806
	unsigned long active;
	int idx, ret = 0;
2807

2808 2809 2810
	if (args->flags != 0)
		return -EINVAL;

2811
	obj = i915_gem_object_lookup(file, args->bo_handle);
2812
	if (!obj)
2813 2814
		return -ENOENT;

2815 2816 2817
	active = __I915_BO_ACTIVE(obj);
	for_each_active(active, idx) {
		s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
2818 2819
		ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
						    I915_WAIT_INTERRUPTIBLE,
2820 2821 2822
						    timeout, rps);
		if (ret)
			break;
2823 2824
	}

2825
	i915_gem_object_put_unlocked(obj);
2826
	return ret;
2827 2828
}

2829 2830
static void __i915_vma_iounmap(struct i915_vma *vma)
{
2831
	GEM_BUG_ON(i915_vma_is_pinned(vma));
2832 2833 2834 2835 2836 2837 2838 2839

	if (vma->iomap == NULL)
		return;

	io_mapping_unmap(vma->iomap);
	vma->iomap = NULL;
}

2840
int i915_vma_unbind(struct i915_vma *vma)
2841
{
2842
	struct drm_i915_gem_object *obj = vma->obj;
2843
	unsigned long active;
2844
	int ret;
2845

2846 2847 2848 2849
	/* First wait upon any activity as retiring the request may
	 * have side-effects such as unpinning or even unbinding this vma.
	 */
	active = i915_vma_get_active(vma);
2850
	if (active) {
2851 2852
		int idx;

2853 2854 2855 2856 2857
		/* When a closed VMA is retired, it is unbound - eek.
		 * In order to prevent it from being recursively closed,
		 * take a pin on the vma so that the second unbind is
		 * aborted.
		 */
2858
		__i915_vma_pin(vma);
2859

2860 2861 2862 2863
		for_each_active(active, idx) {
			ret = i915_gem_active_retire(&vma->last_read[idx],
						   &vma->vm->dev->struct_mutex);
			if (ret)
2864
				break;
2865 2866
		}

2867
		__i915_vma_unpin(vma);
2868 2869 2870
		if (ret)
			return ret;

2871 2872 2873
		GEM_BUG_ON(i915_vma_is_active(vma));
	}

2874
	if (i915_vma_is_pinned(vma))
2875 2876
		return -EBUSY;

2877 2878
	if (!drm_mm_node_allocated(&vma->node))
		goto destroy;
2879

2880 2881
	GEM_BUG_ON(obj->bind_count == 0);
	GEM_BUG_ON(!obj->pages);
2882

2883
	if (i915_vma_is_map_and_fenceable(vma)) {
2884
		/* release the fence reg _after_ flushing */
2885
		ret = i915_vma_put_fence(vma);
2886 2887
		if (ret)
			return ret;
2888

2889 2890 2891
		/* Force a pagefault for domain tracking on next user access */
		i915_gem_release_mmap(obj);

2892
		__i915_vma_iounmap(vma);
2893
		vma->flags &= ~I915_VMA_CAN_FENCE;
2894
	}
2895

2896 2897 2898 2899
	if (likely(!vma->vm->closed)) {
		trace_i915_vma_unbind(vma);
		vma->vm->unbind_vma(vma);
	}
2900
	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
2901

2902 2903 2904
	drm_mm_remove_node(&vma->node);
	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);

2905 2906 2907 2908
	if (vma->pages != obj->pages) {
		GEM_BUG_ON(!vma->pages);
		sg_free_table(vma->pages);
		kfree(vma->pages);
2909
	}
2910
	vma->pages = NULL;
2911

B
Ben Widawsky 已提交
2912
	/* Since the unbound list is global, only move to that list if
2913
	 * no more VMAs exist. */
2914 2915 2916
	if (--obj->bind_count == 0)
		list_move_tail(&obj->global_list,
			       &to_i915(obj->base.dev)->mm.unbound_list);
2917

2918 2919 2920 2921 2922 2923
	/* And finally now the object is completely decoupled from this vma,
	 * we can drop its hold on the backing storage and allow it to be
	 * reaped by the shrinker.
	 */
	i915_gem_object_unpin_pages(obj);

2924
destroy:
2925
	if (unlikely(i915_vma_is_closed(vma)))
2926 2927
		i915_vma_destroy(vma);

2928
	return 0;
2929 2930
}

2931
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2932
			   unsigned int flags)
2933
{
2934
	struct intel_engine_cs *engine;
2935
	int ret;
2936

2937
	for_each_engine(engine, dev_priv) {
2938 2939 2940
		if (engine->last_context == NULL)
			continue;

2941
		ret = intel_engine_idle(engine, flags);
2942 2943 2944
		if (ret)
			return ret;
	}
2945

2946
	return 0;
2947 2948
}

2949
static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2950 2951
				     unsigned long cache_level)
{
2952
	struct drm_mm_node *gtt_space = &vma->node;
2953 2954
	struct drm_mm_node *other;

2955 2956 2957 2958 2959 2960
	/*
	 * On some machines we have to be careful when putting differing types
	 * of snoopable memory together to avoid the prefetcher crossing memory
	 * domains and dying. During vm initialisation, we decide whether or not
	 * these constraints apply and set the drm_mm.color_adjust
	 * appropriately.
2961
	 */
2962
	if (vma->vm->mm.color_adjust == NULL)
2963 2964
		return true;

2965
	if (!drm_mm_node_allocated(gtt_space))
2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981
		return true;

	if (list_empty(&gtt_space->node_list))
		return true;

	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
	if (other->allocated && !other->hole_follows && other->color != cache_level)
		return false;

	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
		return false;

	return true;
}

2982
/**
2983 2984
 * i915_vma_insert - finds a slot for the vma in its address space
 * @vma: the vma
2985
 * @size: requested size in bytes (can be larger than the VMA)
2986
 * @alignment: required alignment
2987
 * @flags: mask of PIN_* flags to use
2988 2989 2990 2991 2992 2993 2994
 *
 * First we try to allocate some free space that meets the requirements for
 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
 * preferrably the oldest idle entry to make room for the new VMA.
 *
 * Returns:
 * 0 on success, negative error code otherwise.
2995
 */
2996 2997
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
2998
{
2999 3000
	struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
	struct drm_i915_gem_object *obj = vma->obj;
3001
	u64 start, end;
3002
	int ret;
3003

3004
	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
3005
	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
3006 3007 3008

	size = max(size, vma->size);
	if (flags & PIN_MAPPABLE)
3009 3010
		size = i915_gem_get_ggtt_size(dev_priv, size,
					      i915_gem_object_get_tiling(obj));
3011

3012 3013 3014 3015
	alignment = max(max(alignment, vma->display_alignment),
			i915_gem_get_ggtt_alignment(dev_priv, size,
						    i915_gem_object_get_tiling(obj),
						    flags & PIN_MAPPABLE));
3016

3017
	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3018 3019

	end = vma->vm->total;
3020
	if (flags & PIN_MAPPABLE)
3021
		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3022
	if (flags & PIN_ZONE_4G)
3023
		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3024

3025 3026 3027
	/* If binding the object/GGTT view requires more space than the entire
	 * aperture has, reject it early before evicting everything in a vain
	 * attempt to find space.
3028
	 */
3029
	if (size > end) {
3030
		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3031
			  size, obj->base.size,
3032
			  flags & PIN_MAPPABLE ? "mappable" : "total",
3033
			  end);
3034
		return -E2BIG;
3035 3036
	}

3037
	ret = i915_gem_object_get_pages(obj);
C
Chris Wilson 已提交
3038
	if (ret)
3039
		return ret;
C
Chris Wilson 已提交
3040

3041 3042
	i915_gem_object_pin_pages(obj);

3043
	if (flags & PIN_OFFSET_FIXED) {
3044
		u64 offset = flags & PIN_OFFSET_MASK;
3045
		if (offset & (alignment - 1) || offset > end - size) {
3046
			ret = -EINVAL;
3047
			goto err_unpin;
3048
		}
3049

3050 3051 3052
		vma->node.start = offset;
		vma->node.size = size;
		vma->node.color = obj->cache_level;
3053
		ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3054 3055 3056
		if (ret) {
			ret = i915_gem_evict_for_vma(vma);
			if (ret == 0)
3057 3058 3059
				ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
			if (ret)
				goto err_unpin;
3060
		}
3061
	} else {
3062 3063
		u32 search_flag, alloc_flag;

3064 3065 3066 3067 3068 3069 3070
		if (flags & PIN_HIGH) {
			search_flag = DRM_MM_SEARCH_BELOW;
			alloc_flag = DRM_MM_CREATE_TOP;
		} else {
			search_flag = DRM_MM_SEARCH_DEFAULT;
			alloc_flag = DRM_MM_CREATE_DEFAULT;
		}
3071

3072 3073 3074 3075 3076 3077 3078 3079 3080
		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
		 * so we know that we always have a minimum alignment of 4096.
		 * The drm_mm range manager is optimised to return results
		 * with zero alignment, so where possible use the optimal
		 * path.
		 */
		if (alignment <= 4096)
			alignment = 0;

3081
search_free:
3082 3083
		ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
							  &vma->node,
3084 3085 3086 3087 3088 3089
							  size, alignment,
							  obj->cache_level,
							  start, end,
							  search_flag,
							  alloc_flag);
		if (ret) {
3090
			ret = i915_gem_evict_something(vma->vm, size, alignment,
3091 3092 3093 3094 3095
						       obj->cache_level,
						       start, end,
						       flags);
			if (ret == 0)
				goto search_free;
3096

3097
			goto err_unpin;
3098
		}
3099
	}
3100
	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3101

3102
	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3103
	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3104
	obj->bind_count++;
3105

3106
	return 0;
B
Ben Widawsky 已提交
3107

3108
err_unpin:
B
Ben Widawsky 已提交
3109
	i915_gem_object_unpin_pages(obj);
3110
	return ret;
3111 3112
}

3113
bool
3114 3115
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			bool force)
3116 3117 3118 3119 3120
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
3121
	if (obj->pages == NULL)
3122
		return false;
3123

3124 3125 3126 3127
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
3128
	if (obj->stolen || obj->phys_handle)
3129
		return false;
3130

3131 3132 3133 3134 3135 3136 3137 3138
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
3139 3140
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
		obj->cache_dirty = true;
3141
		return false;
3142
	}
3143

C
Chris Wilson 已提交
3144
	trace_i915_gem_object_clflush(obj);
3145
	drm_clflush_sg(obj->pages);
3146
	obj->cache_dirty = false;
3147 3148

	return true;
3149 3150 3151 3152
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3153
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3154
{
3155
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
C
Chris Wilson 已提交
3156

3157
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3158 3159
		return;

3160
	/* No actual flushing is required for the GTT write domain.  Writes
3161
	 * to it "immediately" go to main memory as far as we know, so there's
3162
	 * no chipset flush.  It also doesn't land in render cache.
3163 3164 3165 3166
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3167 3168 3169 3170 3171 3172 3173
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
	 * system agents we cannot reproduce this behaviour).
3174
	 */
3175
	wmb();
3176 3177
	if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
3178

3179
	intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3180

3181
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3182
	trace_i915_gem_object_change_domain(obj,
3183
					    obj->base.read_domains,
3184
					    I915_GEM_DOMAIN_GTT);
3185 3186 3187 3188
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3189
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3190
{
3191
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3192 3193
		return;

3194
	if (i915_gem_clflush_object(obj, obj->pin_display))
3195
		i915_gem_chipset_flush(to_i915(obj->base.dev));
3196

3197
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3198

3199
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3200
	trace_i915_gem_object_change_domain(obj,
3201
					    obj->base.read_domains,
3202
					    I915_GEM_DOMAIN_CPU);
3203 3204
}

3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
			continue;

		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}
}

3223 3224
/**
 * Moves a single object to the GTT read, and possibly write domain.
3225 3226
 * @obj: object to act on
 * @write: ask for write access or read only
3227 3228 3229 3230
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3231
int
3232
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3233
{
C
Chris Wilson 已提交
3234
	uint32_t old_write_domain, old_read_domains;
3235
	int ret;
3236

3237
	ret = i915_gem_object_wait_rendering(obj, !write);
3238 3239 3240
	if (ret)
		return ret;

3241 3242 3243
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

3256
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
3257

3258 3259 3260 3261 3262 3263 3264
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3265 3266
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3267

3268 3269 3270
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3271 3272
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3273
	if (write) {
3274 3275 3276
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
		obj->dirty = 1;
3277 3278
	}

C
Chris Wilson 已提交
3279 3280 3281 3282
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3283
	/* And bump the LRU for this access */
3284
	i915_gem_object_bump_inactive_ggtt(obj);
3285

3286 3287 3288
	return 0;
}

3289 3290
/**
 * Changes the cache-level of an object across all VMA.
3291 3292
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3304 3305 3306
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3307
	struct i915_vma *vma;
3308
	int ret = 0;
3309 3310

	if (obj->cache_level == cache_level)
3311
		goto out;
3312

3313 3314 3315 3316 3317
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3318 3319
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3320 3321 3322
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3323
		if (i915_vma_is_pinned(vma)) {
3324 3325 3326 3327
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339
		if (i915_gem_valid_gtt_space(vma, cache_level))
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3340 3341
	}

3342 3343 3344 3345 3346 3347 3348
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3349
	if (obj->bind_count) {
3350 3351 3352 3353
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3354
		ret = i915_gem_object_wait_rendering(obj, false);
3355 3356 3357
		if (ret)
			return ret;

3358
		if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3375 3376 3377 3378 3379
			list_for_each_entry(vma, &obj->vma_list, obj_link) {
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
3380 3381 3382 3383 3384 3385 3386 3387
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3388 3389
		}

3390
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3391 3392 3393 3394 3395 3396 3397
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3398 3399
	}

3400
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3401 3402 3403
		vma->node.color = cache_level;
	obj->cache_level = cache_level;

3404
out:
3405 3406 3407 3408
	/* Flush the dirty CPU caches to the backing storage so that the
	 * object is now coherent at its new cache level (with respect
	 * to the access domain).
	 */
3409
	if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3410
		if (i915_gem_clflush_object(obj, true))
3411
			i915_gem_chipset_flush(to_i915(obj->base.dev));
3412 3413 3414 3415 3416
	}

	return 0;
}

B
Ben Widawsky 已提交
3417 3418
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3419
{
B
Ben Widawsky 已提交
3420
	struct drm_i915_gem_caching *args = data;
3421 3422
	struct drm_i915_gem_object *obj;

3423 3424
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
3425
		return -ENOENT;
3426

3427 3428 3429 3430 3431 3432
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3433 3434 3435 3436
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3437 3438 3439 3440
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3441

3442
	i915_gem_object_put_unlocked(obj);
3443
	return 0;
3444 3445
}

B
Ben Widawsky 已提交
3446 3447
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3448
{
3449
	struct drm_i915_private *dev_priv = to_i915(dev);
B
Ben Widawsky 已提交
3450
	struct drm_i915_gem_caching *args = data;
3451 3452 3453 3454
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3455 3456
	switch (args->caching) {
	case I915_CACHING_NONE:
3457 3458
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3459
	case I915_CACHING_CACHED:
3460 3461 3462 3463 3464 3465
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3466
		if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3467 3468
			return -ENODEV;

3469 3470
		level = I915_CACHE_LLC;
		break;
3471 3472 3473
	case I915_CACHING_DISPLAY:
		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
		break;
3474 3475 3476 3477
	default:
		return -EINVAL;
	}

3478 3479
	intel_runtime_pm_get(dev_priv);

B
Ben Widawsky 已提交
3480 3481
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
3482
		goto rpm_put;
B
Ben Widawsky 已提交
3483

3484 3485
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj) {
3486 3487 3488 3489 3490 3491
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);

3492
	i915_gem_object_put(obj);
3493 3494
unlock:
	mutex_unlock(&dev->struct_mutex);
3495 3496 3497
rpm_put:
	intel_runtime_pm_put(dev_priv);

3498 3499 3500
	return ret;
}

3501
/*
3502 3503 3504
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3505
 */
C
Chris Wilson 已提交
3506
struct i915_vma *
3507 3508
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3509
				     const struct i915_ggtt_view *view)
3510
{
C
Chris Wilson 已提交
3511
	struct i915_vma *vma;
3512
	u32 old_read_domains, old_write_domain;
3513 3514
	int ret;

3515 3516 3517
	/* Mark the pin_display early so that we account for the
	 * display coherency whilst setting up the cache domains.
	 */
3518
	obj->pin_display++;
3519

3520 3521 3522 3523 3524 3525 3526 3527 3528
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3529 3530
	ret = i915_gem_object_set_cache_level(obj,
					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
3531 3532
	if (ret) {
		vma = ERR_PTR(ret);
3533
		goto err_unpin_display;
C
Chris Wilson 已提交
3534
	}
3535

3536 3537
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3538 3539 3540 3541
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
3542
	 */
3543 3544 3545 3546 3547 3548
	vma = ERR_PTR(-ENOSPC);
	if (view->type == I915_GGTT_VIEW_NORMAL)
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
					       PIN_MAPPABLE | PIN_NONBLOCK);
	if (IS_ERR(vma))
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
C
Chris Wilson 已提交
3549
	if (IS_ERR(vma))
3550
		goto err_unpin_display;
3551

3552 3553
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

C
Chris Wilson 已提交
3554 3555
	WARN_ON(obj->pin_display > i915_vma_pin_count(vma));

3556
	i915_gem_object_flush_cpu_write_domain(obj);
3557

3558
	old_write_domain = obj->base.write_domain;
3559
	old_read_domains = obj->base.read_domains;
3560 3561 3562 3563

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3564
	obj->base.write_domain = 0;
3565
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3566 3567 3568

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3569
					    old_write_domain);
3570

C
Chris Wilson 已提交
3571
	return vma;
3572 3573

err_unpin_display:
3574
	obj->pin_display--;
C
Chris Wilson 已提交
3575
	return vma;
3576 3577 3578
}

void
C
Chris Wilson 已提交
3579
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3580
{
C
Chris Wilson 已提交
3581
	if (WARN_ON(vma->obj->pin_display == 0))
3582 3583
		return;

3584 3585
	if (--vma->obj->pin_display == 0)
		vma->display_alignment = 0;
3586

3587 3588 3589 3590
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
	if (!i915_vma_is_active(vma))
		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);

C
Chris Wilson 已提交
3591 3592
	i915_vma_unpin(vma);
	WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
3593 3594
}

3595 3596
/**
 * Moves a single object to the CPU read, and possibly write domain.
3597 3598
 * @obj: object to act on
 * @write: requesting write or read-only access
3599 3600 3601 3602
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3603
int
3604
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3605
{
C
Chris Wilson 已提交
3606
	uint32_t old_write_domain, old_read_domains;
3607 3608
	int ret;

3609
	ret = i915_gem_object_wait_rendering(obj, !write);
3610 3611 3612
	if (ret)
		return ret;

3613 3614 3615
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3616
	i915_gem_object_flush_gtt_write_domain(obj);
3617

3618 3619
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3620

3621
	/* Flush the CPU cache if it's still invalid. */
3622
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3623
		i915_gem_clflush_object(obj, false);
3624

3625
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3626 3627 3628 3629 3630
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3631
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3632 3633 3634 3635 3636

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3637 3638
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3639
	}
3640

C
Chris Wilson 已提交
3641 3642 3643 3644
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3645 3646 3647
	return 0;
}

3648 3649 3650
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3651 3652 3653 3654
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3655 3656 3657
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3658
static int
3659
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3660
{
3661
	struct drm_i915_private *dev_priv = to_i915(dev);
3662
	struct drm_i915_file_private *file_priv = file->driver_priv;
3663
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3664
	struct drm_i915_gem_request *request, *target = NULL;
3665
	int ret;
3666

3667 3668 3669 3670
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
	if (ret)
		return ret;

3671 3672 3673
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
3674

3675
	spin_lock(&file_priv->mm.lock);
3676
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3677 3678
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3679

3680 3681 3682 3683 3684 3685 3686
		/*
		 * Note that the request might not have been submitted yet.
		 * In which case emitted_jiffies will be zero.
		 */
		if (!request->emitted_jiffies)
			continue;

3687
		target = request;
3688
	}
3689
	if (target)
3690
		i915_gem_request_get(target);
3691
	spin_unlock(&file_priv->mm.lock);
3692

3693
	if (target == NULL)
3694
		return 0;
3695

3696
	ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
3697
	i915_gem_request_put(target);
3698

3699 3700 3701
	return ret;
}

3702
static bool
3703
i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3704
{
3705 3706 3707
	if (!drm_mm_node_allocated(&vma->node))
		return false;

3708 3709 3710 3711
	if (vma->node.size < size)
		return true;

	if (alignment && vma->node.start & (alignment - 1))
3712 3713
		return true;

3714
	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
3715 3716 3717 3718 3719 3720
		return true;

	if (flags & PIN_OFFSET_BIAS &&
	    vma->node.start < (flags & PIN_OFFSET_MASK))
		return true;

3721 3722 3723 3724
	if (flags & PIN_OFFSET_FIXED &&
	    vma->node.start != (flags & PIN_OFFSET_MASK))
		return true;

3725 3726 3727
	return false;
}

3728 3729 3730
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
3731
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3732 3733 3734
	bool mappable, fenceable;
	u32 fence_size, fence_alignment;

3735
	fence_size = i915_gem_get_ggtt_size(dev_priv,
3736
					    vma->size,
3737
					    i915_gem_object_get_tiling(obj));
3738
	fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3739
						      vma->size,
3740
						      i915_gem_object_get_tiling(obj),
3741
						      true);
3742 3743 3744 3745 3746

	fenceable = (vma->node.size == fence_size &&
		     (vma->node.start & (fence_alignment - 1)) == 0);

	mappable = (vma->node.start + fence_size <=
3747
		    dev_priv->ggtt.mappable_end);
3748

3749 3750 3751 3752
	if (mappable && fenceable)
		vma->flags |= I915_VMA_CAN_FENCE;
	else
		vma->flags &= ~I915_VMA_CAN_FENCE;
3753 3754
}

3755 3756
int __i915_vma_do_pin(struct i915_vma *vma,
		      u64 size, u64 alignment, u64 flags)
3757
{
3758
	unsigned int bound = vma->flags;
3759 3760
	int ret;

3761
	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3762
	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
B
Ben Widawsky 已提交
3763

3764 3765 3766 3767
	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
		ret = -EBUSY;
		goto err;
	}
3768

3769
	if ((bound & I915_VMA_BIND_MASK) == 0) {
3770 3771 3772
		ret = i915_vma_insert(vma, size, alignment, flags);
		if (ret)
			goto err;
3773
	}
3774

3775
	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3776
	if (ret)
3777
		goto err;
3778

3779
	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
3780
		__i915_vma_set_map_and_fenceable(vma);
3781

3782
	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3783 3784
	return 0;

3785 3786 3787
err:
	__i915_vma_unpin(vma);
	return ret;
3788 3789
}

C
Chris Wilson 已提交
3790
struct i915_vma *
3791 3792
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
3793
			 u64 size,
3794 3795
			 u64 alignment,
			 u64 flags)
3796
{
C
Chris Wilson 已提交
3797
	struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
3798 3799
	struct i915_vma *vma;
	int ret;
3800

C
Chris Wilson 已提交
3801
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3802
	if (IS_ERR(vma))
C
Chris Wilson 已提交
3803
		return vma;
3804 3805 3806 3807

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
		if (flags & PIN_NONBLOCK &&
		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
C
Chris Wilson 已提交
3808
			return ERR_PTR(-ENOSPC);
3809 3810 3811

		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
3812 3813 3814
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
3815
		     !!(flags & PIN_MAPPABLE),
3816
		     i915_vma_is_map_and_fenceable(vma));
3817 3818
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
3819
			return ERR_PTR(ret);
3820 3821
	}

C
Chris Wilson 已提交
3822 3823 3824
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
3825

C
Chris Wilson 已提交
3826
	return vma;
3827 3828
}

3829
static __always_inline unsigned int __busy_read_flag(unsigned int id)
3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
3844 3845 3846 3847 3848 3849 3850 3851 3852
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
3853 3854
}

3855
static __always_inline unsigned int
3856 3857 3858
__busy_set_if_active(const struct i915_gem_active *active,
		     unsigned int (*flag)(unsigned int id))
{
3859
	struct drm_i915_gem_request *request;
3860

3861 3862 3863
	request = rcu_dereference(active->request);
	if (!request || i915_gem_request_completed(request))
		return 0;
3864

3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920
	/* This is racy. See __i915_gem_active_get_rcu() for an in detail
	 * discussion of how to handle the race correctly, but for reporting
	 * the busy state we err on the side of potentially reporting the
	 * wrong engine as being busy (but we guarantee that the result
	 * is at least self-consistent).
	 *
	 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
	 * whilst we are inspecting it, even under the RCU read lock as we are.
	 * This means that there is a small window for the engine and/or the
	 * seqno to have been overwritten. The seqno will always be in the
	 * future compared to the intended, and so we know that if that
	 * seqno is idle (on whatever engine) our request is idle and the
	 * return 0 above is correct.
	 *
	 * The issue is that if the engine is switched, it is just as likely
	 * to report that it is busy (but since the switch happened, we know
	 * the request should be idle). So there is a small chance that a busy
	 * result is actually the wrong engine.
	 *
	 * So why don't we care?
	 *
	 * For starters, the busy ioctl is a heuristic that is by definition
	 * racy. Even with perfect serialisation in the driver, the hardware
	 * state is constantly advancing - the state we report to the user
	 * is stale.
	 *
	 * The critical information for the busy-ioctl is whether the object
	 * is idle as userspace relies on that to detect whether its next
	 * access will stall, or if it has missed submitting commands to
	 * the hardware allowing the GPU to stall. We never generate a
	 * false-positive for idleness, thus busy-ioctl is reliable at the
	 * most fundamental level, and we maintain the guarantee that a
	 * busy object left to itself will eventually become idle (and stay
	 * idle!).
	 *
	 * We allow ourselves the leeway of potentially misreporting the busy
	 * state because that is an optimisation heuristic that is constantly
	 * in flux. Being quickly able to detect the busy/idle state is much
	 * more important than accurate logging of exactly which engines were
	 * busy.
	 *
	 * For accuracy in reporting the engine, we could use
	 *
	 *	result = 0;
	 *	request = __i915_gem_active_get_rcu(active);
	 *	if (request) {
	 *		if (!i915_gem_request_completed(request))
	 *			result = flag(request->engine->exec_id);
	 *		i915_gem_request_put(request);
	 *	}
	 *
	 * but that still remains susceptible to both hardware and userspace
	 * races. So we accept making the result of that race slightly worse,
	 * given the rarity of the race and its low impact on the result.
	 */
	return flag(READ_ONCE(request->engine->exec_id));
3921 3922
}

3923
static __always_inline unsigned int
3924 3925 3926 3927 3928
busy_check_reader(const struct i915_gem_active *active)
{
	return __busy_set_if_active(active, __busy_read_flag);
}

3929
static __always_inline unsigned int
3930 3931 3932 3933 3934
busy_check_writer(const struct i915_gem_active *active)
{
	return __busy_set_if_active(active, __busy_write_id);
}

3935 3936
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3937
		    struct drm_file *file)
3938 3939
{
	struct drm_i915_gem_busy *args = data;
3940
	struct drm_i915_gem_object *obj;
3941
	unsigned long active;
3942

3943
	obj = i915_gem_object_lookup(file, args->handle);
3944 3945
	if (!obj)
		return -ENOENT;
3946

3947
	args->busy = 0;
3948 3949 3950
	active = __I915_BO_ACTIVE(obj);
	if (active) {
		int idx;
3951

3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967
		/* Yes, the lookups are intentionally racy.
		 *
		 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
		 * to regard the value as stale and as our ABI guarantees
		 * forward progress, we confirm the status of each active
		 * request with the hardware.
		 *
		 * Even though we guard the pointer lookup by RCU, that only
		 * guarantees that the pointer and its contents remain
		 * dereferencable and does *not* mean that the request we
		 * have is the same as the one being tracked by the object.
		 *
		 * Consider that we lookup the request just as it is being
		 * retired and freed. We take a local copy of the pointer,
		 * but before we add its engine into the busy set, the other
		 * thread reallocates it and assigns it to a task on another
3968 3969 3970 3971 3972 3973
		 * engine with a fresh and incomplete seqno. Guarding against
		 * that requires careful serialisation and reference counting,
		 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
		 * instead we expect that if the result is busy, which engines
		 * are busy is not completely reliable - we only guarantee
		 * that the object was busy.
3974 3975 3976 3977 3978 3979 3980
		 */
		rcu_read_lock();

		for_each_active(active, idx)
			args->busy |= busy_check_reader(&obj->last_read[idx]);

		/* For ABI sanity, we only care that the write engine is in
3981 3982 3983 3984 3985
		 * the set of read engines. This should be ensured by the
		 * ordering of setting last_read/last_write in
		 * i915_vma_move_to_active(), and then in reverse in retire.
		 * However, for good measure, we always report the last_write
		 * request as a busy read as well as being a busy write.
3986 3987 3988 3989 3990 3991 3992 3993 3994
		 *
		 * We don't care that the set of active read/write engines
		 * may change during construction of the result, as it is
		 * equally liable to change before userspace can inspect
		 * the result.
		 */
		args->busy |= busy_check_writer(&obj->last_write);

		rcu_read_unlock();
3995
	}
3996

3997 3998
	i915_gem_object_put_unlocked(obj);
	return 0;
3999 4000 4001 4002 4003 4004
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4005
	return i915_gem_ring_throttle(dev, file_priv);
4006 4007
}

4008 4009 4010 4011
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4012
	struct drm_i915_private *dev_priv = to_i915(dev);
4013
	struct drm_i915_gem_madvise *args = data;
4014
	struct drm_i915_gem_object *obj;
4015
	int ret;
4016 4017 4018 4019 4020 4021 4022 4023 4024

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4025 4026 4027 4028
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

4029 4030
	obj = i915_gem_object_lookup(file_priv, args->handle);
	if (!obj) {
4031 4032
		ret = -ENOENT;
		goto unlock;
4033 4034
	}

4035
	if (obj->pages &&
4036
	    i915_gem_object_is_tiled(obj) &&
4037 4038 4039 4040 4041 4042 4043
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		if (obj->madv == I915_MADV_WILLNEED)
			i915_gem_object_unpin_pages(obj);
		if (args->madv == I915_MADV_WILLNEED)
			i915_gem_object_pin_pages(obj);
	}

4044 4045
	if (obj->madv != __I915_MADV_PURGED)
		obj->madv = args->madv;
4046

C
Chris Wilson 已提交
4047
	/* if the object is no longer attached, discard its backing storage */
4048
	if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4049 4050
		i915_gem_object_truncate(obj);

4051
	args->retained = obj->madv != __I915_MADV_PURGED;
C
Chris Wilson 已提交
4052

4053
	i915_gem_object_put(obj);
4054
unlock:
4055
	mutex_unlock(&dev->struct_mutex);
4056
	return ret;
4057 4058
}

4059 4060
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4061
{
4062 4063
	int i;

4064
	INIT_LIST_HEAD(&obj->global_list);
4065
	for (i = 0; i < I915_NUM_ENGINES; i++)
4066 4067 4068 4069
		init_request_active(&obj->last_read[i],
				    i915_gem_object_retire__read);
	init_request_active(&obj->last_write,
			    i915_gem_object_retire__write);
4070
	INIT_LIST_HEAD(&obj->obj_exec_link);
B
Ben Widawsky 已提交
4071
	INIT_LIST_HEAD(&obj->vma_list);
4072
	INIT_LIST_HEAD(&obj->batch_pool_link);
4073

4074 4075
	obj->ops = ops;

4076
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4077 4078
	obj->madv = I915_MADV_WILLNEED;

4079
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4080 4081
}

4082
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4083
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4084 4085 4086 4087
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

4088
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4089
						  size_t size)
4090
{
4091
	struct drm_i915_gem_object *obj;
4092
	struct address_space *mapping;
D
Daniel Vetter 已提交
4093
	gfp_t mask;
4094
	int ret;
4095

4096
	obj = i915_gem_object_alloc(dev);
4097
	if (obj == NULL)
4098
		return ERR_PTR(-ENOMEM);
4099

4100 4101 4102
	ret = drm_gem_object_init(dev, &obj->base, size);
	if (ret)
		goto fail;
4103

4104 4105 4106 4107 4108 4109 4110
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4111
	mapping = obj->base.filp->f_mapping;
4112
	mapping_set_gfp_mask(mapping, mask);
4113

4114
	i915_gem_object_init(obj, &i915_gem_object_ops);
4115

4116 4117
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4118

4119 4120
	if (HAS_LLC(dev)) {
		/* On some devices, we can have the GPU use the LLC (the CPU
4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

4136 4137
	trace_i915_gem_object_create(obj);

4138
	return obj;
4139 4140 4141 4142 4143

fail:
	i915_gem_object_free(obj);

	return ERR_PTR(ret);
4144 4145
}

4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

	if (obj->madv != I915_MADV_WILLNEED)
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4170
void i915_gem_free_object(struct drm_gem_object *gem_obj)
4171
{
4172
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4173
	struct drm_device *dev = obj->base.dev;
4174
	struct drm_i915_private *dev_priv = to_i915(dev);
4175
	struct i915_vma *vma, *next;
4176

4177 4178
	intel_runtime_pm_get(dev_priv);

4179 4180
	trace_i915_gem_object_destroy(obj);

4181 4182 4183 4184 4185 4186 4187
	/* All file-owned VMA should have been released by this point through
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4188
	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4189
		GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4190
		GEM_BUG_ON(i915_vma_is_active(vma));
4191
		vma->flags &= ~I915_VMA_PIN_MASK;
4192
		i915_vma_close(vma);
4193
	}
4194
	GEM_BUG_ON(obj->bind_count);
4195

B
Ben Widawsky 已提交
4196 4197 4198 4199 4200
	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
	 * before progressing. */
	if (obj->stolen)
		i915_gem_object_unpin_pages(obj);

4201
	WARN_ON(atomic_read(&obj->frontbuffer_bits));
4202

4203 4204
	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4205
	    i915_gem_object_is_tiled(obj))
4206 4207
		i915_gem_object_unpin_pages(obj);

B
Ben Widawsky 已提交
4208 4209
	if (WARN_ON(obj->pages_pin_count))
		obj->pages_pin_count = 0;
4210
	if (discard_backing_storage(obj))
4211
		obj->madv = I915_MADV_DONTNEED;
4212
	i915_gem_object_put_pages(obj);
4213

4214 4215
	BUG_ON(obj->pages);

4216 4217
	if (obj->base.import_attach)
		drm_prime_gem_destroy(&obj->base, NULL);
4218

4219 4220 4221
	if (obj->ops->release)
		obj->ops->release(obj);

4222 4223
	drm_gem_object_release(&obj->base);
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
4224

4225
	kfree(obj->bit_17);
4226
	i915_gem_object_free(obj);
4227 4228

	intel_runtime_pm_put(dev_priv);
4229 4230
}

4231
int i915_gem_suspend(struct drm_device *dev)
4232
{
4233
	struct drm_i915_private *dev_priv = to_i915(dev);
4234
	int ret;
4235

4236 4237
	intel_suspend_gt_powersave(dev_priv);

4238
	mutex_lock(&dev->struct_mutex);
4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	ret = i915_gem_switch_to_kernel_context(dev_priv);
	if (ret)
		goto err;

4252 4253 4254
	ret = i915_gem_wait_for_idle(dev_priv,
				     I915_WAIT_INTERRUPTIBLE |
				     I915_WAIT_LOCKED);
4255
	if (ret)
4256
		goto err;
4257

4258
	i915_gem_retire_requests(dev_priv);
4259

4260
	i915_gem_context_lost(dev_priv);
4261 4262
	mutex_unlock(&dev->struct_mutex);

4263
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4264 4265
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
	flush_delayed_work(&dev_priv->gt.idle_work);
4266

4267 4268 4269
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
4270
	WARN_ON(dev_priv->gt.awake);
4271

4272
	return 0;
4273 4274 4275 4276

err:
	mutex_unlock(&dev->struct_mutex);
	return ret;
4277 4278
}

4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289
void i915_gem_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	mutex_lock(&dev->struct_mutex);
	i915_gem_restore_gtt_mappings(dev);

	/* As we didn't flush the kernel context before suspend, we cannot
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
4290
	dev_priv->gt.resume(dev_priv);
4291 4292 4293 4294

	mutex_unlock(&dev->struct_mutex);
}

4295 4296
void i915_gem_init_swizzling(struct drm_device *dev)
{
4297
	struct drm_i915_private *dev_priv = to_i915(dev);
4298

4299
	if (INTEL_INFO(dev)->gen < 5 ||
4300 4301 4302 4303 4304 4305
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4306 4307 4308
	if (IS_GEN5(dev))
		return;

4309 4310
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
	if (IS_GEN6(dev))
4311
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4312
	else if (IS_GEN7(dev))
4313
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
B
Ben Widawsky 已提交
4314 4315
	else if (IS_GEN8(dev))
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4316 4317
	else
		BUG();
4318
}
D
Daniel Vetter 已提交
4319

4320 4321
static void init_unused_ring(struct drm_device *dev, u32 base)
{
4322
	struct drm_i915_private *dev_priv = to_i915(dev);
4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346

	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

static void init_unused_rings(struct drm_device *dev)
{
	if (IS_I830(dev)) {
		init_unused_ring(dev, PRB1_BASE);
		init_unused_ring(dev, SRB0_BASE);
		init_unused_ring(dev, SRB1_BASE);
		init_unused_ring(dev, SRB2_BASE);
		init_unused_ring(dev, SRB3_BASE);
	} else if (IS_GEN2(dev)) {
		init_unused_ring(dev, SRB0_BASE);
		init_unused_ring(dev, SRB1_BASE);
	} else if (IS_GEN3(dev)) {
		init_unused_ring(dev, PRB1_BASE);
		init_unused_ring(dev, PRB2_BASE);
	}
}

4347 4348 4349
int
i915_gem_init_hw(struct drm_device *dev)
{
4350
	struct drm_i915_private *dev_priv = to_i915(dev);
4351
	struct intel_engine_cs *engine;
C
Chris Wilson 已提交
4352
	int ret;
4353

4354 4355 4356
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4357
	if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4358
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4359

4360 4361 4362
	if (IS_HASWELL(dev))
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4363

4364
	if (HAS_PCH_NOP(dev)) {
4365 4366 4367 4368 4369 4370 4371 4372 4373
		if (IS_IVYBRIDGE(dev)) {
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
		} else if (INTEL_INFO(dev)->gen >= 7) {
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4374 4375
	}

4376 4377
	i915_gem_init_swizzling(dev);

4378 4379 4380 4381 4382 4383 4384 4385
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
	init_unused_rings(dev);

4386
	BUG_ON(!dev_priv->kernel_context);
4387

4388 4389 4390 4391 4392 4393 4394
	ret = i915_ppgtt_init_hw(dev);
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

	/* Need to do basic initialisation of all rings first: */
4395
	for_each_engine(engine, dev_priv) {
4396
		ret = engine->init_hw(engine);
D
Daniel Vetter 已提交
4397
		if (ret)
4398
			goto out;
D
Daniel Vetter 已提交
4399
	}
4400

4401 4402
	intel_mocs_init_l3cc_table(dev);

4403
	/* We can't enable contexts until all firmware is loaded */
4404 4405 4406
	ret = intel_guc_setup(dev);
	if (ret)
		goto out;
4407

4408 4409
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4410
	return ret;
4411 4412
}

4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
{
	if (INTEL_INFO(dev_priv)->gen < 6)
		return false;

	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

	if (value >= 0)
		return value;

#ifdef CONFIG_INTEL_IOMMU
	/* Enable semaphores on SNB when IO remapping is off */
	if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

4434 4435
int i915_gem_init(struct drm_device *dev)
{
4436
	struct drm_i915_private *dev_priv = to_i915(dev);
4437 4438 4439
	int ret;

	mutex_lock(&dev->struct_mutex);
4440

4441
	if (!i915.enable_execlists) {
4442
		dev_priv->gt.resume = intel_legacy_submission_resume;
4443
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4444
	} else {
4445
		dev_priv->gt.resume = intel_lr_context_resume;
4446
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4447 4448
	}

4449 4450 4451 4452 4453 4454 4455 4456
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4457
	i915_gem_init_userptr(dev_priv);
4458 4459 4460 4461

	ret = i915_gem_init_ggtt(dev_priv);
	if (ret)
		goto out_unlock;
4462

4463
	ret = i915_gem_context_init(dev);
4464 4465
	if (ret)
		goto out_unlock;
4466

4467
	ret = intel_engines_init(dev);
D
Daniel Vetter 已提交
4468
	if (ret)
4469
		goto out_unlock;
4470

4471
	ret = i915_gem_init_hw(dev);
4472
	if (ret == -EIO) {
4473
		/* Allow engine initialisation to fail by marking the GPU as
4474 4475 4476 4477
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4478
		i915_gem_set_wedged(dev_priv);
4479
		ret = 0;
4480
	}
4481 4482

out_unlock:
4483
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4484
	mutex_unlock(&dev->struct_mutex);
4485

4486
	return ret;
4487 4488
}

4489
void
4490
i915_gem_cleanup_engines(struct drm_device *dev)
4491
{
4492
	struct drm_i915_private *dev_priv = to_i915(dev);
4493
	struct intel_engine_cs *engine;
4494

4495
	for_each_engine(engine, dev_priv)
4496
		dev_priv->gt.cleanup_engine(engine);
4497 4498
}

4499
static void
4500
init_engine_lists(struct intel_engine_cs *engine)
4501
{
4502
	INIT_LIST_HEAD(&engine->request_list);
4503 4504
}

4505 4506 4507
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
4508
	struct drm_device *dev = &dev_priv->drm;
4509
	int i;
4510 4511 4512 4513 4514 4515 4516 4517 4518 4519

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
	else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
		 IS_I945GM(dev_priv) || IS_G33(dev_priv))
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4520
	if (intel_vgpu_active(dev_priv))
4521 4522 4523 4524
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
4525 4526 4527 4528 4529 4530 4531
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
4532 4533 4534 4535 4536
	i915_gem_restore_fences(dev);

	i915_gem_detect_bit_6_swizzle(dev);
}

4537
void
4538
i915_gem_load_init(struct drm_device *dev)
4539
{
4540
	struct drm_i915_private *dev_priv = to_i915(dev);
4541 4542
	int i;

4543
	dev_priv->objects =
4544 4545 4546 4547
		kmem_cache_create("i915_gem_object",
				  sizeof(struct drm_i915_gem_object), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
4548 4549 4550 4551 4552
	dev_priv->vmas =
		kmem_cache_create("i915_gem_vma",
				  sizeof(struct i915_vma), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
4553 4554 4555
	dev_priv->requests =
		kmem_cache_create("i915_gem_request",
				  sizeof(struct drm_i915_gem_request), 0,
4556 4557 4558
				  SLAB_HWCACHE_ALIGN |
				  SLAB_RECLAIM_ACCOUNT |
				  SLAB_DESTROY_BY_RCU,
4559
				  NULL);
4560

4561
	INIT_LIST_HEAD(&dev_priv->context_list);
C
Chris Wilson 已提交
4562 4563
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4564
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4565 4566
	for (i = 0; i < I915_NUM_ENGINES; i++)
		init_engine_lists(&dev_priv->engine[i]);
4567
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4568
			  i915_gem_retire_work_handler);
4569
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4570
			  i915_gem_idle_work_handler);
4571
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4572
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4573

4574 4575
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

4576
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4577

4578 4579
	dev_priv->mm.interruptible = true;

4580 4581
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

4582
	spin_lock_init(&dev_priv->fb_tracking.lock);
4583
}
4584

4585 4586 4587 4588 4589 4590 4591
void i915_gem_load_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	kmem_cache_destroy(dev_priv->requests);
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
4592 4593 4594

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
4595 4596
}

4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink_all(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	intel_runtime_pm_put(dev_priv);

	return 0;
}

4610 4611 4612
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
4613 4614 4615 4616 4617
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
4618 4619 4620 4621 4622 4623 4624 4625 4626 4627

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
4628 4629 4630
	 *
	 * To try and reduce the hibernation image, we manually shrink
	 * the objects as well.
4631 4632
	 */

4633 4634
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4635

4636 4637 4638 4639 4640
	for (p = phases; *p; p++) {
		list_for_each_entry(obj, *p, global_list) {
			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
		}
4641
	}
4642
	mutex_unlock(&dev_priv->drm.struct_mutex);
4643 4644 4645 4646

	return 0;
}

4647
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4648
{
4649
	struct drm_i915_file_private *file_priv = file->driver_priv;
4650
	struct drm_i915_gem_request *request;
4651 4652 4653 4654 4655

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4656
	spin_lock(&file_priv->mm.lock);
4657
	list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4658
		request->file_priv = NULL;
4659
	spin_unlock(&file_priv->mm.lock);
4660

4661
	if (!list_empty(&file_priv->rps.link)) {
4662
		spin_lock(&to_i915(dev)->rps.client_lock);
4663
		list_del(&file_priv->rps.link);
4664
		spin_unlock(&to_i915(dev)->rps.client_lock);
4665
	}
4666 4667 4668 4669 4670
}

int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;
4671
	int ret;
4672 4673 4674 4675 4676 4677 4678 4679

	DRM_DEBUG_DRIVER("\n");

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
4680
	file_priv->dev_priv = to_i915(dev);
4681
	file_priv->file = file;
4682
	INIT_LIST_HEAD(&file_priv->rps.link);
4683 4684 4685 4686

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

4687
	file_priv->bsd_engine = -1;
4688

4689 4690 4691
	ret = i915_gem_context_open(dev, file);
	if (ret)
		kfree(file_priv);
4692

4693
	return ret;
4694 4695
}

4696 4697
/**
 * i915_gem_track_fb - update frontbuffer tracking
4698 4699 4700
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
4701 4702 4703 4704
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
4705 4706 4707 4708
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
4709 4710 4711 4712 4713 4714 4715 4716 4717
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

4718
	if (old) {
4719 4720
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4721 4722 4723
	}

	if (new) {
4724 4725
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4726 4727 4728
	}
}

4729 4730 4731 4732 4733 4734 4735
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
{
	struct page *page;

	/* Only default objects have per-page dirty tracking */
4736
	if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4737 4738 4739 4740 4741 4742 4743
		return NULL;

	page = i915_gem_object_get_page(obj, n);
	set_page_dirty(page);
	return page;
}

4744 4745 4746 4747 4748 4749 4750 4751 4752 4753
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
	struct sg_table *sg;
	size_t bytes;
	int ret;

4754
	obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4755
	if (IS_ERR(obj))
4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768
		return obj;

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret)
		goto fail;

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		goto fail;

	i915_gem_object_pin_pages(obj);
	sg = obj->pages;
	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4769
	obj->dirty = 1;		/* Backing store is now out of date */
4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780
	i915_gem_object_unpin_pages(obj);

	if (WARN_ON(bytes != size)) {
		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
		ret = -EFAULT;
		goto fail;
	}

	return obj;

fail:
4781
	i915_gem_object_put(obj);
4782 4783
	return ERR_PTR(ret);
}