i915_gem.c 136.3 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_vgpu.h"
C
Chris Wilson 已提交
33
#include "i915_trace.h"
34
#include "intel_drv.h"
35
#include "intel_mocs.h"
36
#include <linux/shmem_fs.h>
37
#include <linux/slab.h>
38
#include <linux/swap.h>
J
Jesse Barnes 已提交
39
#include <linux/pci.h>
40
#include <linux/dma-buf.h>
41

42
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
43
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
44
static void
45 46 47
i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
static void
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
48

49 50 51 52 53 54
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
}

55 56 57 58 59 60 61 62
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

63 64 65 66
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
67
	spin_lock(&dev_priv->mm.object_stat_lock);
68 69
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
70
	spin_unlock(&dev_priv->mm.object_stat_lock);
71 72 73 74 75
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
76
	spin_lock(&dev_priv->mm.object_stat_lock);
77 78
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
79
	spin_unlock(&dev_priv->mm.object_stat_lock);
80 81
}

82
static int
83
i915_gem_wait_for_error(struct i915_gpu_error *error)
84 85 86
{
	int ret;

87
	if (!i915_reset_in_progress(error))
88 89
		return 0;

90 91 92 93 94
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
95
	ret = wait_event_interruptible_timeout(error->reset_queue,
96
					       !i915_reset_in_progress(error),
97
					       10*HZ);
98 99 100 101
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
102
		return ret;
103 104
	} else {
		return 0;
105
	}
106 107
}

108
int i915_mutex_lock_interruptible(struct drm_device *dev)
109
{
110
	struct drm_i915_private *dev_priv = dev->dev_private;
111 112
	int ret;

113
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
114 115 116 117 118 119 120
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

121
	WARN_ON(i915_verify_lists(dev));
122 123
	return 0;
}
124

125 126
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
127
			    struct drm_file *file)
128
{
129
	struct drm_i915_private *dev_priv = to_i915(dev);
130
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
131
	struct drm_i915_gem_get_aperture *args = data;
132
	struct i915_vma *vma;
133
	size_t pinned;
134

135
	pinned = 0;
136
	mutex_lock(&dev->struct_mutex);
137
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
138 139
		if (vma->pin_count)
			pinned += vma->node.size;
140
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
141 142
		if (vma->pin_count)
			pinned += vma->node.size;
143
	mutex_unlock(&dev->struct_mutex);
144

145
	args->aper_size = ggtt->base.total;
146
	args->aper_available_size = args->aper_size - pinned;
147

148 149 150
	return 0;
}

151 152
static int
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
153
{
154 155 156 157 158
	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
	char *vaddr = obj->phys_handle->vaddr;
	struct sg_table *st;
	struct scatterlist *sg;
	int i;
159

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
		return -EINVAL;

	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page))
			return PTR_ERR(page);

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

176
		put_page(page);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
		vaddr += PAGE_SIZE;
	}

	i915_gem_chipset_flush(obj->base.dev);

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return -ENOMEM;
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
194

195 196 197 198 199 200 201 202 203 204 205 206 207
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

	obj->pages = st;
	return 0;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
{
	int ret;

	BUG_ON(obj->madv == __I915_MADV_PURGED);
208

209
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
210
	if (WARN_ON(ret)) {
211 212 213 214 215 216 217 218 219 220
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;

	if (obj->dirty) {
221
		struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
222
		char *vaddr = obj->phys_handle->vaddr;
223 224 225
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
226 227 228 229 230 231 232 233 234 235 236 237 238 239
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
			if (obj->madv == I915_MADV_WILLNEED)
240
				mark_page_accessed(page);
241
			put_page(page);
242 243
			vaddr += PAGE_SIZE;
		}
244
		obj->dirty = 0;
245 246
	}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	sg_free_table(obj->pages);
	kfree(obj->pages);
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
	drm_pci_free(obj->base.dev, obj->phys_handle);
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

static int
drop_pages(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma, *next;
	int ret;

	drm_gem_object_reference(&obj->base);
270
	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
271 272 273 274 275 276 277
		if (i915_vma_unbind(vma))
			break;

	ret = i915_gem_object_put_pages(obj);
	drm_gem_object_unreference(&obj->base);

	return ret;
278 279 280 281 282 283 284
}

int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
	drm_dma_handle_t *phys;
285
	int ret;
286 287 288 289 290 291 292 293 294 295 296 297 298 299

	if (obj->phys_handle) {
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
			return -EBUSY;

		return 0;
	}

	if (obj->madv != I915_MADV_WILLNEED)
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

300 301 302 303
	ret = drop_pages(obj);
	if (ret)
		return ret;

304 305 306 307 308 309
	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
	if (!phys)
		return -ENOMEM;

	obj->phys_handle = phys;
310 311 312
	obj->ops = &i915_gem_phys_ops;

	return i915_gem_object_get_pages(obj);
313 314 315 316 317 318 319 320 321 322
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
	char __user *user_data = to_user_ptr(args->data_ptr);
323
	int ret = 0;
324 325 326 327 328 329 330

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
	ret = i915_gem_object_wait_rendering(obj, false);
	if (ret)
		return ret;
331

332
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
333 334 335 336 337 338 339 340 341 342
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
343 344 345 346
		if (unwritten) {
			ret = -EFAULT;
			goto out;
		}
347 348
	}

349
	drm_clflush_virt_range(vaddr, args->size);
350
	i915_gem_chipset_flush(dev);
351 352

out:
353
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
354
	return ret;
355 356
}

357 358 359
void *i915_gem_object_alloc(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
360
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
361 362 363 364 365
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
366
	kmem_cache_free(dev_priv->objects, obj);
367 368
}

369 370 371 372 373
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
374
{
375
	struct drm_i915_gem_object *obj;
376 377
	int ret;
	u32 handle;
378

379
	size = roundup(size, PAGE_SIZE);
380 381
	if (size == 0)
		return -EINVAL;
382 383

	/* Allocate the new object */
384
	obj = i915_gem_object_create(dev, size);
385 386
	if (IS_ERR(obj))
		return PTR_ERR(obj);
387

388
	ret = drm_gem_handle_create(file, &obj->base, &handle);
389
	/* drop reference from allocate - handle holds it now */
390 391 392
	drm_gem_object_unreference_unlocked(&obj->base);
	if (ret)
		return ret;
393

394
	*handle_p = handle;
395 396 397
	return 0;
}

398 399 400 401 402 403
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
404
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
405 406
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
407
			       args->size, &args->handle);
408 409 410 411 412 413 414 415 416 417
}

/**
 * Creates a new mm object and returns a handle to it.
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
418

419
	return i915_gem_create(file, dev,
420
			       args->size, &args->handle);
421 422
}

423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

449
static inline int
450 451
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

475 476 477 478 479 480 481 482 483 484 485 486
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
				    int *needs_clflush)
{
	int ret;

	*needs_clflush = 0;

487
	if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
		return -EINVAL;

	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
		/* If we're not in the cpu read domain, set ourself into the gtt
		 * read domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will dirty the data
		 * anyway again before the next pread happens. */
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
		ret = i915_gem_object_wait_rendering(obj, true);
		if (ret)
			return ret;
	}

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

	return ret;
}

511 512 513
/* Per-page copy function for the shmem pread fastpath.
 * Flushes invalid cachelines before reading the target if
 * needs_clflush is set. */
514
static int
515 516 517 518 519 520 521
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

522
	if (unlikely(page_do_bit17_swizzling))
523 524 525 526 527 528 529 530 531 532 533
		return -EINVAL;

	vaddr = kmap_atomic(page);
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_to_user_inatomic(user_data,
				      vaddr + shmem_page_offset,
				      page_length);
	kunmap_atomic(vaddr);

534
	return ret ? -EFAULT : 0;
535 536
}

537 538 539 540
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
541
	if (unlikely(swizzled)) {
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

559 560 561 562 563 564 565 566 567 568 569 570
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
571 572 573
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
574 575 576 577 578 579 580 581 582 583 584

	if (page_do_bit17_swizzling)
		ret = __copy_to_user_swizzled(user_data,
					      vaddr, shmem_page_offset,
					      page_length);
	else
		ret = __copy_to_user(user_data,
				     vaddr + shmem_page_offset,
				     page_length);
	kunmap(page);

585
	return ret ? - EFAULT : 0;
586 587
}

588
static int
589 590 591 592
i915_gem_shmem_pread(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args,
		     struct drm_file *file)
593
{
594
	char __user *user_data;
595
	ssize_t remain;
596
	loff_t offset;
597
	int shmem_page_offset, page_length, ret = 0;
598
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
599
	int prefaulted = 0;
600
	int needs_clflush = 0;
601
	struct sg_page_iter sg_iter;
602

V
Ville Syrjälä 已提交
603
	user_data = to_user_ptr(args->data_ptr);
604 605
	remain = args->size;

606
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
607

608
	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
609 610 611
	if (ret)
		return ret;

612
	offset = args->offset;
613

614 615
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
616
		struct page *page = sg_page_iter_page(&sg_iter);
617 618 619 620

		if (remain <= 0)
			break;

621 622 623 624 625
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
626
		shmem_page_offset = offset_in_page(offset);
627 628 629 630
		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

631 632 633
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

634 635 636 637 638
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
		if (ret == 0)
			goto next_page;
639 640 641

		mutex_unlock(&dev->struct_mutex);

642
		if (likely(!i915.prefault_disable) && !prefaulted) {
643
			ret = fault_in_multipages_writeable(user_data, remain);
644 645 646 647 648 649 650
			/* Userspace is tricking us, but we've already clobbered
			 * its pages with the prefault and promised to write the
			 * data up to the first fault. Hence ignore any errors
			 * and just continue. */
			(void)ret;
			prefaulted = 1;
		}
651

652 653 654
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
655

656
		mutex_lock(&dev->struct_mutex);
657 658

		if (ret)
659 660
			goto out;

661
next_page:
662
		remain -= page_length;
663
		user_data += page_length;
664 665 666
		offset += page_length;
	}

667
out:
668 669
	i915_gem_object_unpin_pages(obj);

670 671 672
	return ret;
}

673 674 675 676 677 678 679
/**
 * Reads data from the object referenced by handle.
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
680
		     struct drm_file *file)
681 682
{
	struct drm_i915_gem_pread *args = data;
683
	struct drm_i915_gem_object *obj;
684
	int ret = 0;
685

686 687 688 689
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
V
Ville Syrjälä 已提交
690
		       to_user_ptr(args->data_ptr),
691 692 693
		       args->size))
		return -EFAULT;

694
	ret = i915_mutex_lock_interruptible(dev);
695
	if (ret)
696
		return ret;
697

698
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
699
	if (&obj->base == NULL) {
700 701
		ret = -ENOENT;
		goto unlock;
702
	}
703

704
	/* Bounds check source.  */
705 706
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
707
		ret = -EINVAL;
708
		goto out;
C
Chris Wilson 已提交
709 710
	}

711 712 713 714 715 716 717 718
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

C
Chris Wilson 已提交
719 720
	trace_i915_gem_object_pread(obj, args->offset, args->size);

721
	ret = i915_gem_shmem_pread(dev, obj, args, file);
722

723
out:
724
	drm_gem_object_unreference(&obj->base);
725
unlock:
726
	mutex_unlock(&dev->struct_mutex);
727
	return ret;
728 729
}

730 731
/* This is the fast write path which cannot handle
 * page faults in the source data
732
 */
733 734 735 736 737 738

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
739
{
740 741
	void __iomem *vaddr_atomic;
	void *vaddr;
742
	unsigned long unwritten;
743

P
Peter Zijlstra 已提交
744
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
745 746 747
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force*)vaddr_atomic + page_offset;
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
748
						      user_data, length);
P
Peter Zijlstra 已提交
749
	io_mapping_unmap_atomic(vaddr_atomic);
750
	return unwritten;
751 752
}

753 754 755 756
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 */
757
static int
758 759
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
760
			 struct drm_i915_gem_pwrite *args,
761
			 struct drm_file *file)
762
{
763 764
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
765
	ssize_t remain;
766
	loff_t offset, page_base;
767
	char __user *user_data;
D
Daniel Vetter 已提交
768 769
	int page_offset, page_length, ret;

770
	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
D
Daniel Vetter 已提交
771 772 773 774 775 776 777 778 779 780
	if (ret)
		goto out;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		goto out_unpin;
781

V
Ville Syrjälä 已提交
782
	user_data = to_user_ptr(args->data_ptr);
783 784
	remain = args->size;

785
	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
786

787
	intel_fb_obj_invalidate(obj, ORIGIN_GTT);
788

789 790 791
	while (remain > 0) {
		/* Operation in this page
		 *
792 793 794
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
795
		 */
796 797
		page_base = offset & PAGE_MASK;
		page_offset = offset_in_page(offset);
798 799 800 801 802
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

		/* If we get a fault while copying data, then (presumably) our
803 804
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
805
		 */
806
		if (fast_user_write(ggtt->mappable, page_base,
D
Daniel Vetter 已提交
807 808
				    page_offset, user_data, page_length)) {
			ret = -EFAULT;
809
			goto out_flush;
D
Daniel Vetter 已提交
810
		}
811

812 813 814
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
815 816
	}

817
out_flush:
818
	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
D
Daniel Vetter 已提交
819
out_unpin:
B
Ben Widawsky 已提交
820
	i915_gem_object_ggtt_unpin(obj);
D
Daniel Vetter 已提交
821
out:
822
	return ret;
823 824
}

825 826 827 828
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set. */
829
static int
830 831 832 833 834
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
835
{
836
	char *vaddr;
837
	int ret;
838

839
	if (unlikely(page_do_bit17_swizzling))
840
		return -EINVAL;
841

842 843 844 845
	vaddr = kmap_atomic(page);
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
846 847
	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
					user_data, page_length);
848 849 850 851
	if (needs_clflush_after)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	kunmap_atomic(vaddr);
852

853
	return ret ? -EFAULT : 0;
854 855
}

856 857
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
858
static int
859 860 861 862 863
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
864
{
865 866
	char *vaddr;
	int ret;
867

868
	vaddr = kmap(page);
869
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
870 871 872
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
873 874
	if (page_do_bit17_swizzling)
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
875 876
						user_data,
						page_length);
877 878 879 880 881
	else
		ret = __copy_from_user(vaddr + shmem_page_offset,
				       user_data,
				       page_length);
	if (needs_clflush_after)
882 883 884
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
885
	kunmap(page);
886

887
	return ret ? -EFAULT : 0;
888 889 890
}

static int
891 892 893 894
i915_gem_shmem_pwrite(struct drm_device *dev,
		      struct drm_i915_gem_object *obj,
		      struct drm_i915_gem_pwrite *args,
		      struct drm_file *file)
895 896
{
	ssize_t remain;
897 898
	loff_t offset;
	char __user *user_data;
899
	int shmem_page_offset, page_length, ret = 0;
900
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
901
	int hit_slowpath = 0;
902 903
	int needs_clflush_after = 0;
	int needs_clflush_before = 0;
904
	struct sg_page_iter sg_iter;
905

V
Ville Syrjälä 已提交
906
	user_data = to_user_ptr(args->data_ptr);
907 908
	remain = args->size;

909
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
910

911 912 913 914 915
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
		/* If we're not in the cpu write domain, set ourself into the gtt
		 * write domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will use the data
		 * right away and we therefore have to clflush anyway. */
916
		needs_clflush_after = cpu_write_needs_clflush(obj);
917 918 919
		ret = i915_gem_object_wait_rendering(obj, false);
		if (ret)
			return ret;
920
	}
921 922 923 924 925
	/* Same trick applies to invalidate partially written cachelines read
	 * before writing. */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
		needs_clflush_before =
			!cpu_cache_is_coherent(dev, obj->cache_level);
926

927 928 929 930
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

931
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
932

933 934
	i915_gem_object_pin_pages(obj);

935
	offset = args->offset;
936
	obj->dirty = 1;
937

938 939
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
940
		struct page *page = sg_page_iter_page(&sg_iter);
941
		int partial_cacheline_write;
942

943 944 945
		if (remain <= 0)
			break;

946 947 948 949 950
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
951
		shmem_page_offset = offset_in_page(offset);
952 953 954 955 956

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

957 958 959 960 961 962 963
		/* If we don't overwrite a cacheline completely we need to be
		 * careful to have up-to-date data by first clflushing. Don't
		 * overcomplicate things and flush the entire patch. */
		partial_cacheline_write = needs_clflush_before &&
			((shmem_page_offset | page_length)
				& (boot_cpu_data.x86_clflush_size - 1));

964 965 966
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

967 968 969 970 971 972
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
		if (ret == 0)
			goto next_page;
973 974 975

		hit_slowpath = 1;
		mutex_unlock(&dev->struct_mutex);
976 977 978 979
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
980

981
		mutex_lock(&dev->struct_mutex);
982 983

		if (ret)
984 985
			goto out;

986
next_page:
987
		remain -= page_length;
988
		user_data += page_length;
989
		offset += page_length;
990 991
	}

992
out:
993 994
	i915_gem_object_unpin_pages(obj);

995
	if (hit_slowpath) {
996 997 998 999 1000 1001 1002
		/*
		 * Fixup: Flush cpu caches in case we didn't flush the dirty
		 * cachelines in-line while writing and the object moved
		 * out of the cpu write domain while we've dropped the lock.
		 */
		if (!needs_clflush_after &&
		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1003
			if (i915_gem_clflush_object(obj, obj->pin_display))
1004
				needs_clflush_after = true;
1005
		}
1006
	}
1007

1008
	if (needs_clflush_after)
1009
		i915_gem_chipset_flush(dev);
1010 1011
	else
		obj->cache_dirty = true;
1012

1013
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1014
	return ret;
1015 1016 1017 1018 1019 1020 1021 1022 1023
}

/**
 * Writes data to the object referenced by handle.
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1024
		      struct drm_file *file)
1025
{
1026
	struct drm_i915_private *dev_priv = dev->dev_private;
1027
	struct drm_i915_gem_pwrite *args = data;
1028
	struct drm_i915_gem_object *obj;
1029 1030 1031 1032 1033 1034
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
V
Ville Syrjälä 已提交
1035
		       to_user_ptr(args->data_ptr),
1036 1037 1038
		       args->size))
		return -EFAULT;

1039
	if (likely(!i915.prefault_disable)) {
1040 1041 1042 1043 1044
		ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
						   args->size);
		if (ret)
			return -EFAULT;
	}
1045

1046 1047
	intel_runtime_pm_get(dev_priv);

1048
	ret = i915_mutex_lock_interruptible(dev);
1049
	if (ret)
1050
		goto put_rpm;
1051

1052
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1053
	if (&obj->base == NULL) {
1054 1055
		ret = -ENOENT;
		goto unlock;
1056
	}
1057

1058
	/* Bounds check destination. */
1059 1060
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1061
		ret = -EINVAL;
1062
		goto out;
C
Chris Wilson 已提交
1063 1064
	}

1065 1066 1067 1068 1069 1070 1071 1072
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

C
Chris Wilson 已提交
1073 1074
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

D
Daniel Vetter 已提交
1075
	ret = -EFAULT;
1076 1077 1078 1079 1080 1081
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1082 1083 1084
	if (obj->tiling_mode == I915_TILING_NONE &&
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
	    cpu_write_needs_clflush(obj)) {
1085
		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
D
Daniel Vetter 已提交
1086 1087 1088
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
		 * textures). Fallback to the shmem path in that case. */
1089
	}
1090

1091 1092 1093 1094 1095 1096
	if (ret == -EFAULT || ret == -ENOSPC) {
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
		else
			ret = i915_gem_shmem_pwrite(dev, obj, args, file);
	}
1097

1098
out:
1099
	drm_gem_object_unreference(&obj->base);
1100
unlock:
1101
	mutex_unlock(&dev->struct_mutex);
1102 1103 1104
put_rpm:
	intel_runtime_pm_put(dev_priv);

1105 1106 1107
	return ret;
}

1108 1109
static int
i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
1110
{
1111 1112
	if (__i915_terminally_wedged(reset_counter))
		return -EIO;
1113

1114
	if (__i915_reset_in_progress(reset_counter)) {
1115 1116 1117 1118 1119
		/* Non-interruptible callers can't handle -EAGAIN, hence return
		 * -EIO unconditionally for these. */
		if (!interruptible)
			return -EIO;

1120
		return -EAGAIN;
1121 1122 1123 1124 1125
	}

	return 0;
}

1126 1127 1128 1129 1130 1131
static void fake_irq(unsigned long data)
{
	wake_up_process((struct task_struct *)data);
}

static bool missed_irq(struct drm_i915_private *dev_priv,
1132
		       struct intel_engine_cs *engine)
1133
{
1134
	return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
1135 1136
}

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
static unsigned long local_clock_us(unsigned *cpu)
{
	unsigned long t;

	/* Cheaply and approximately convert from nanoseconds to microseconds.
	 * The result and subsequent calculations are also defined in the same
	 * approximate microseconds units. The principal source of timing
	 * error here is from the simple truncation.
	 *
	 * Note that local_clock() is only defined wrt to the current CPU;
	 * the comparisons are no longer valid if we switch CPUs. Instead of
	 * blocking preemption for the entire busywait, we can detect the CPU
	 * switch and use that as indicator of system load and a reason to
	 * stop busywaiting, see busywait_stop().
	 */
	*cpu = get_cpu();
	t = local_clock() >> 10;
	put_cpu();

	return t;
}

static bool busywait_stop(unsigned long timeout, unsigned cpu)
{
	unsigned this_cpu;

	if (time_after(local_clock_us(&this_cpu), timeout))
		return true;

	return this_cpu != cpu;
}

1169
static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1170
{
1171
	unsigned long timeout;
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
	unsigned cpu;

	/* When waiting for high frequency requests, e.g. during synchronous
	 * rendering split between the CPU and GPU, the finite amount of time
	 * required to set up the irq and wait upon it limits the response
	 * rate. By busywaiting on the request completion for a short while we
	 * can service the high frequency waits as quick as possible. However,
	 * if it is a slow request, we want to sleep as quickly as possible.
	 * The tradeoff between waiting and sleeping is roughly the time it
	 * takes to sleep on a request, on the order of a microsecond.
	 */
1183

1184
	if (req->engine->irq_refcount)
1185 1186
		return -EBUSY;

1187 1188 1189 1190
	/* Only spin if we know the GPU is processing this request */
	if (!i915_gem_request_started(req, true))
		return -EAGAIN;

1191
	timeout = local_clock_us(&cpu) + 5;
1192
	while (!need_resched()) {
D
Daniel Vetter 已提交
1193
		if (i915_gem_request_completed(req, true))
1194 1195
			return 0;

1196 1197 1198
		if (signal_pending_state(state, current))
			break;

1199
		if (busywait_stop(timeout, cpu))
1200
			break;
1201

1202 1203
		cpu_relax_lowlatency();
	}
1204

D
Daniel Vetter 已提交
1205
	if (i915_gem_request_completed(req, false))
1206 1207 1208
		return 0;

	return -EAGAIN;
1209 1210
}

1211
/**
1212 1213
 * __i915_wait_request - wait until execution of request has finished
 * @req: duh!
1214 1215 1216
 * @interruptible: do an interruptible wait (normally yes)
 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
 *
1217 1218 1219 1220 1221 1222 1223
 * Note: It is of utmost importance that the passed in seqno and reset_counter
 * values have been read by the caller in an smp safe manner. Where read-side
 * locks are involved, it is sufficient to read the reset_counter before
 * unlocking the lock that protects the seqno. For lockless tricks, the
 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
 * inserted.
 *
1224
 * Returns 0 if the request was found within the alloted time. Else returns the
1225 1226
 * errno with remaining time filled in timeout argument.
 */
1227
int __i915_wait_request(struct drm_i915_gem_request *req,
1228
			bool interruptible,
1229
			s64 *timeout,
1230
			struct intel_rps_client *rps)
1231
{
1232
	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1233
	struct drm_device *dev = engine->dev;
1234
	struct drm_i915_private *dev_priv = dev->dev_private;
1235
	const bool irq_test_in_progress =
1236
		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1237
	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1238
	DEFINE_WAIT(wait);
1239
	unsigned long timeout_expire;
1240
	s64 before = 0; /* Only to silence a compiler warning. */
1241 1242
	int ret;

1243
	WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1244

1245 1246 1247
	if (list_empty(&req->list))
		return 0;

1248
	if (i915_gem_request_completed(req, true))
1249 1250
		return 0;

1251 1252 1253 1254 1255 1256 1257 1258 1259
	timeout_expire = 0;
	if (timeout) {
		if (WARN_ON(*timeout < 0))
			return -EINVAL;

		if (*timeout == 0)
			return -ETIME;

		timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
1260 1261 1262 1263 1264

		/*
		 * Record current time in case interrupted by signal, or wedged.
		 */
		before = ktime_get_raw_ns();
1265
	}
1266

1267
	if (INTEL_INFO(dev_priv)->gen >= 6)
1268
		gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
1269

1270
	trace_i915_gem_request_wait_begin(req);
1271 1272

	/* Optimistic spin for the next jiffie before touching IRQs */
1273
	ret = __i915_spin_request(req, state);
1274 1275 1276
	if (ret == 0)
		goto out;

1277
	if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
1278 1279 1280 1281
		ret = -ENODEV;
		goto out;
	}

1282 1283
	for (;;) {
		struct timer_list timer;
1284

1285
		prepare_to_wait(&engine->irq_queue, &wait, state);
1286

1287
		/* We need to check whether any gpu reset happened in between
1288 1289 1290 1291 1292 1293
		 * the request being submitted and now. If a reset has occurred,
		 * the request is effectively complete (we either are in the
		 * process of or have discarded the rendering and completely
		 * reset the GPU. The results of the request are lost and we
		 * are free to continue on with the original operation.
		 */
1294
		if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
1295
			ret = 0;
1296 1297
			break;
		}
1298

1299
		if (i915_gem_request_completed(req, false)) {
1300 1301 1302
			ret = 0;
			break;
		}
1303

1304
		if (signal_pending_state(state, current)) {
1305 1306 1307 1308
			ret = -ERESTARTSYS;
			break;
		}

1309
		if (timeout && time_after_eq(jiffies, timeout_expire)) {
1310 1311 1312 1313 1314
			ret = -ETIME;
			break;
		}

		timer.function = NULL;
1315
		if (timeout || missed_irq(dev_priv, engine)) {
1316 1317
			unsigned long expire;

1318
			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1319
			expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
1320 1321 1322
			mod_timer(&timer, expire);
		}

1323
		io_schedule();
1324 1325 1326 1327 1328 1329

		if (timer.function) {
			del_singleshot_timer_sync(&timer);
			destroy_timer_on_stack(&timer);
		}
	}
1330
	if (!irq_test_in_progress)
1331
		engine->irq_put(engine);
1332

1333
	finish_wait(&engine->irq_queue, &wait);
1334

1335 1336 1337
out:
	trace_i915_gem_request_wait_end(req);

1338
	if (timeout) {
1339
		s64 tres = *timeout - (ktime_get_raw_ns() - before);
1340 1341

		*timeout = tres < 0 ? 0 : tres;
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351

		/*
		 * Apparently ktime isn't accurate enough and occasionally has a
		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
		 * things up to make the test happy. We allow up to 1 jiffy.
		 *
		 * This is a regrssion from the timespec->ktime conversion.
		 */
		if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
			*timeout = 0;
1352 1353
	}

1354
	return ret;
1355 1356
}

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
				   struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;

	WARN_ON(!req || !file || req->file_priv);

	if (!req || !file)
		return -EINVAL;

	if (req->file_priv)
		return -EINVAL;

	file_priv = file->driver_priv;

	spin_lock(&file_priv->mm.lock);
	req->file_priv = file_priv;
	list_add_tail(&req->client_list, &file_priv->mm.request_list);
	spin_unlock(&file_priv->mm.lock);

	req->pid = get_pid(task_pid(current));

	return 0;
}

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
	struct drm_i915_file_private *file_priv = request->file_priv;

	if (!file_priv)
		return;

	spin_lock(&file_priv->mm.lock);
	list_del(&request->client_list);
	request->file_priv = NULL;
	spin_unlock(&file_priv->mm.lock);
1394 1395 1396

	put_pid(request->pid);
	request->pid = NULL;
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
}

static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
	trace_i915_gem_request_retire(request);

	/* We know the GPU must have read the request to have
	 * sent us the seqno + interrupt, so use the position
	 * of tail of the request to update the last known position
	 * of the GPU head.
	 *
	 * Note this requires that we are always called in request
	 * completion order.
	 */
	request->ringbuf->last_retired_head = request->postfix;

	list_del_init(&request->list);
	i915_gem_request_remove_from_client(request);

	i915_gem_request_unreference(request);
}

static void
__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
{
1422
	struct intel_engine_cs *engine = req->engine;
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
	struct drm_i915_gem_request *tmp;

	lockdep_assert_held(&engine->dev->struct_mutex);

	if (list_empty(&req->list))
		return;

	do {
		tmp = list_first_entry(&engine->request_list,
				       typeof(*tmp), list);

		i915_gem_request_retire(tmp);
	} while (tmp != req);

	WARN_ON(i915_verify_lists(engine->dev));
}

1440
/**
1441
 * Waits for a request to be signaled, and cleans up the
1442 1443 1444
 * request and object lists appropriately for that event.
 */
int
1445
i915_wait_request(struct drm_i915_gem_request *req)
1446
{
1447
	struct drm_i915_private *dev_priv = req->i915;
1448
	bool interruptible;
1449 1450
	int ret;

1451 1452
	interruptible = dev_priv->mm.interruptible;

1453
	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
1454

1455
	ret = __i915_wait_request(req, interruptible, NULL, NULL);
1456 1457
	if (ret)
		return ret;
1458

1459
	__i915_gem_request_retire__upto(req);
1460 1461 1462
	return 0;
}

1463 1464 1465 1466
/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
 */
1467
int
1468 1469 1470
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly)
{
1471
	int ret, i;
1472

1473
	if (!obj->active)
1474 1475
		return 0;

1476 1477 1478 1479 1480
	if (readonly) {
		if (obj->last_write_req != NULL) {
			ret = i915_wait_request(obj->last_write_req);
			if (ret)
				return ret;
1481

1482
			i = obj->last_write_req->engine->id;
1483 1484 1485 1486 1487 1488
			if (obj->last_read_req[i] == obj->last_write_req)
				i915_gem_object_retire__read(obj, i);
			else
				i915_gem_object_retire__write(obj);
		}
	} else {
1489
		for (i = 0; i < I915_NUM_ENGINES; i++) {
1490 1491 1492 1493 1494 1495 1496 1497 1498
			if (obj->last_read_req[i] == NULL)
				continue;

			ret = i915_wait_request(obj->last_read_req[i]);
			if (ret)
				return ret;

			i915_gem_object_retire__read(obj, i);
		}
1499
		GEM_BUG_ON(obj->active);
1500 1501 1502 1503 1504 1505 1506 1507 1508
	}

	return 0;
}

static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
			       struct drm_i915_gem_request *req)
{
1509
	int ring = req->engine->id;
1510 1511 1512 1513 1514 1515 1516

	if (obj->last_read_req[ring] == req)
		i915_gem_object_retire__read(obj, ring);
	else if (obj->last_write_req == req)
		i915_gem_object_retire__write(obj);

	__i915_gem_request_retire__upto(req);
1517 1518
}

1519 1520 1521 1522 1523
/* A nonblocking variant of the above wait. This is a highly dangerous routine
 * as the object state may change during this call.
 */
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1524
					    struct intel_rps_client *rps,
1525 1526 1527 1528
					    bool readonly)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
1529
	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1530
	int ret, i, n = 0;
1531 1532 1533 1534

	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
	BUG_ON(!dev_priv->mm.interruptible);

1535
	if (!obj->active)
1536 1537
		return 0;

1538 1539 1540 1541 1542 1543 1544 1545 1546
	if (readonly) {
		struct drm_i915_gem_request *req;

		req = obj->last_write_req;
		if (req == NULL)
			return 0;

		requests[n++] = i915_gem_request_reference(req);
	} else {
1547
		for (i = 0; i < I915_NUM_ENGINES; i++) {
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
			struct drm_i915_gem_request *req;

			req = obj->last_read_req[i];
			if (req == NULL)
				continue;

			requests[n++] = i915_gem_request_reference(req);
		}
	}

1558
	mutex_unlock(&dev->struct_mutex);
1559
	ret = 0;
1560
	for (i = 0; ret == 0 && i < n; i++)
1561
		ret = __i915_wait_request(requests[i], true, NULL, rps);
1562 1563
	mutex_lock(&dev->struct_mutex);

1564 1565 1566 1567 1568 1569 1570
	for (i = 0; i < n; i++) {
		if (ret == 0)
			i915_gem_object_retire_request(obj, requests[i]);
		i915_gem_request_unreference(requests[i]);
	}

	return ret;
1571 1572
}

1573 1574 1575 1576 1577 1578
static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;
	return &fpriv->rps;
}

1579
/**
1580 1581
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1582 1583 1584
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1585
			  struct drm_file *file)
1586 1587
{
	struct drm_i915_gem_set_domain *args = data;
1588
	struct drm_i915_gem_object *obj;
1589 1590
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1591 1592
	int ret;

1593
	/* Only handle setting domains to types used by the CPU. */
1594
	if (write_domain & I915_GEM_GPU_DOMAINS)
1595 1596
		return -EINVAL;

1597
	if (read_domains & I915_GEM_GPU_DOMAINS)
1598 1599 1600 1601 1602 1603 1604 1605
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1606
	ret = i915_mutex_lock_interruptible(dev);
1607
	if (ret)
1608
		return ret;
1609

1610
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1611
	if (&obj->base == NULL) {
1612 1613
		ret = -ENOENT;
		goto unlock;
1614
	}
1615

1616 1617 1618 1619
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1620
	ret = i915_gem_object_wait_rendering__nonblocking(obj,
1621
							  to_rps_client(file),
1622
							  !write_domain);
1623 1624 1625
	if (ret)
		goto unref;

1626
	if (read_domains & I915_GEM_DOMAIN_GTT)
1627
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1628
	else
1629
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1630

1631 1632 1633 1634 1635
	if (write_domain != 0)
		intel_fb_obj_invalidate(obj,
					write_domain == I915_GEM_DOMAIN_GTT ?
					ORIGIN_GTT : ORIGIN_CPU);

1636
unref:
1637
	drm_gem_object_unreference(&obj->base);
1638
unlock:
1639 1640 1641 1642 1643 1644 1645 1646 1647
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Called when user space has done writes to this buffer
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1648
			 struct drm_file *file)
1649 1650
{
	struct drm_i915_gem_sw_finish *args = data;
1651
	struct drm_i915_gem_object *obj;
1652 1653
	int ret = 0;

1654
	ret = i915_mutex_lock_interruptible(dev);
1655
	if (ret)
1656
		return ret;
1657

1658
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1659
	if (&obj->base == NULL) {
1660 1661
		ret = -ENOENT;
		goto unlock;
1662 1663 1664
	}

	/* Pinned buffers may be scanout, so flush the cache */
1665
	if (obj->pin_display)
1666
		i915_gem_object_flush_cpu_write_domain(obj);
1667

1668
	drm_gem_object_unreference(&obj->base);
1669
unlock:
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Maps the contents of an object, returning the address it is mapped
 * into.
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1690 1691 1692
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1693
		    struct drm_file *file)
1694 1695 1696 1697 1698
{
	struct drm_i915_gem_mmap *args = data;
	struct drm_gem_object *obj;
	unsigned long addr;

1699 1700 1701 1702 1703 1704
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

	if (args->flags & I915_MMAP_WC && !cpu_has_pat)
		return -ENODEV;

1705
	obj = drm_gem_object_lookup(dev, file, args->handle);
1706
	if (obj == NULL)
1707
		return -ENOENT;
1708

1709 1710 1711 1712 1713 1714 1715 1716
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
	if (!obj->filp) {
		drm_gem_object_unreference_unlocked(obj);
		return -EINVAL;
	}

1717
	addr = vm_mmap(obj->filp, 0, args->size,
1718 1719
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

		down_write(&mm->mmap_sem);
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
	}
1733
	drm_gem_object_unreference_unlocked(obj);
1734 1735 1736 1737 1738 1739 1740 1741
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1742 1743
/**
 * i915_gem_fault - fault a page into the GTT
1744 1745
 * @vma: VMA in question
 * @vmf: fault info
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
 */
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
1760 1761
	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
	struct drm_device *dev = obj->base.dev;
1762 1763
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1764
	struct i915_ggtt_view view = i915_ggtt_view_normal;
1765 1766 1767
	pgoff_t page_offset;
	unsigned long pfn;
	int ret = 0;
1768
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1769

1770 1771
	intel_runtime_pm_get(dev_priv);

1772 1773 1774 1775
	/* We don't use vmf->pgoff since that has the fake offset */
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;

1776 1777 1778
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto out;
1779

C
Chris Wilson 已提交
1780 1781
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1782 1783 1784 1785 1786 1787 1788 1789 1790
	/* Try to flush the object off the GPU first without holding the lock.
	 * Upon reacquiring the lock, we will perform our sanity checks and then
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
	ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
	if (ret)
		goto unlock;

1791 1792
	/* Access to snoopable pages through the GTT is incoherent. */
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1793
		ret = -EFAULT;
1794 1795 1796
		goto unlock;
	}

1797
	/* Use a partial view if the object is bigger than the aperture. */
1798
	if (obj->base.size >= ggtt->mappable_end &&
1799
	    obj->tiling_mode == I915_TILING_NONE) {
1800
		static const unsigned int chunk_size = 256; // 1 MiB
1801

1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
		memset(&view, 0, sizeof(view));
		view.type = I915_GGTT_VIEW_PARTIAL;
		view.params.partial.offset = rounddown(page_offset, chunk_size);
		view.params.partial.size =
			min_t(unsigned int,
			      chunk_size,
			      (vma->vm_end - vma->vm_start)/PAGE_SIZE -
			      view.params.partial.offset);
	}

	/* Now pin it into the GTT if needed */
	ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
1814 1815
	if (ret)
		goto unlock;
1816

1817 1818 1819
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
		goto unpin;
1820

1821
	ret = i915_gem_object_get_fence(obj);
1822
	if (ret)
1823
		goto unpin;
1824

1825
	/* Finally, remap it using the new GTT offset */
1826
	pfn = ggtt->mappable_base +
1827
		i915_gem_obj_ggtt_offset_view(obj, &view);
1828
	pfn >>= PAGE_SHIFT;
1829

1830 1831 1832 1833 1834 1835 1836 1837 1838
	if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
		/* Overriding existing pages in partial view does not cause
		 * us any trouble as TLBs are still valid because the fault
		 * is due to userspace losing part of the mapping or never
		 * having accessed it before (at this partials' range).
		 */
		unsigned long base = vma->vm_start +
				     (view.params.partial.offset << PAGE_SHIFT);
		unsigned int i;
1839

1840 1841
		for (i = 0; i < view.params.partial.size; i++) {
			ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1842 1843 1844 1845 1846
			if (ret)
				break;
		}

		obj->fault_mappable = true;
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
	} else {
		if (!obj->fault_mappable) {
			unsigned long size = min_t(unsigned long,
						   vma->vm_end - vma->vm_start,
						   obj->base.size);
			int i;

			for (i = 0; i < size >> PAGE_SHIFT; i++) {
				ret = vm_insert_pfn(vma,
						    (unsigned long)vma->vm_start + i * PAGE_SIZE,
						    pfn + i);
				if (ret)
					break;
			}

			obj->fault_mappable = true;
		} else
			ret = vm_insert_pfn(vma,
					    (unsigned long)vmf->virtual_address,
					    pfn + page_offset);
	}
1868
unpin:
1869
	i915_gem_object_ggtt_unpin_view(obj, &view);
1870
unlock:
1871
	mutex_unlock(&dev->struct_mutex);
1872
out:
1873
	switch (ret) {
1874
	case -EIO:
1875 1876 1877 1878 1879 1880 1881
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1882 1883 1884
			ret = VM_FAULT_SIGBUS;
			break;
		}
1885
	case -EAGAIN:
D
Daniel Vetter 已提交
1886 1887 1888 1889
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1890
		 */
1891 1892
	case 0:
	case -ERESTARTSYS:
1893
	case -EINTR:
1894 1895 1896 1897 1898
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1899 1900
		ret = VM_FAULT_NOPAGE;
		break;
1901
	case -ENOMEM:
1902 1903
		ret = VM_FAULT_OOM;
		break;
1904
	case -ENOSPC:
1905
	case -EFAULT:
1906 1907
		ret = VM_FAULT_SIGBUS;
		break;
1908
	default:
1909
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1910 1911
		ret = VM_FAULT_SIGBUS;
		break;
1912
	}
1913 1914 1915

	intel_runtime_pm_put(dev_priv);
	return ret;
1916 1917
}

1918 1919 1920 1921
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1922
 * Preserve the reservation of the mmapping with the DRM core code, but
1923 1924 1925 1926 1927 1928 1929 1930 1931
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1932
void
1933
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1934
{
1935 1936 1937 1938 1939 1940
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
	 */
	lockdep_assert_held(&obj->base.dev->struct_mutex);

1941 1942
	if (!obj->fault_mappable)
		return;
1943

1944 1945
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();

1956
	obj->fault_mappable = false;
1957 1958
}

1959 1960 1961 1962 1963 1964 1965 1966 1967
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;

	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
		i915_gem_release_mmap(obj);
}

1968
uint32_t
1969
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1970
{
1971
	uint32_t gtt_size;
1972 1973

	if (INTEL_INFO(dev)->gen >= 4 ||
1974 1975
	    tiling_mode == I915_TILING_NONE)
		return size;
1976 1977 1978

	/* Previous chips need a power-of-two fence region when tiling */
	if (INTEL_INFO(dev)->gen == 3)
1979
		gtt_size = 1024*1024;
1980
	else
1981
		gtt_size = 512*1024;
1982

1983 1984
	while (gtt_size < size)
		gtt_size <<= 1;
1985

1986
	return gtt_size;
1987 1988
}

1989 1990 1991 1992 1993
/**
 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
 * @obj: object to check
 *
 * Return the required GTT alignment for an object, taking into account
1994
 * potential fence register mapping.
1995
 */
1996 1997 1998
uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
			   int tiling_mode, bool fenced)
1999 2000 2001 2002 2003
{
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
2004
	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
2005
	    tiling_mode == I915_TILING_NONE)
2006 2007
		return 4096;

2008 2009 2010 2011
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
2012
	return i915_gem_get_gtt_size(dev, size, tiling_mode);
2013 2014
}

2015 2016 2017 2018 2019
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	int ret;

2020
	if (drm_vma_node_has_offset(&obj->base.vma_node))
2021 2022
		return 0;

2023 2024
	dev_priv->mm.shrinker_no_lock_stealing = true;

2025 2026
	ret = drm_gem_create_mmap_offset(&obj->base);
	if (ret != -ENOSPC)
2027
		goto out;
2028 2029 2030 2031 2032 2033 2034 2035

	/* Badly fragmented mmap space? The only way we can recover
	 * space is by destroying unwanted objects. We can't randomly release
	 * mmap_offsets as userspace expects them to be persistent for the
	 * lifetime of the objects. The closest we can is to release the
	 * offsets on purgeable objects by truncating it and marking it purged,
	 * which prevents userspace from ever using that object again.
	 */
2036 2037 2038 2039 2040
	i915_gem_shrink(dev_priv,
			obj->base.size >> PAGE_SHIFT,
			I915_SHRINK_BOUND |
			I915_SHRINK_UNBOUND |
			I915_SHRINK_PURGEABLE);
2041 2042
	ret = drm_gem_create_mmap_offset(&obj->base);
	if (ret != -ENOSPC)
2043
		goto out;
2044 2045

	i915_gem_shrink_all(dev_priv);
2046 2047 2048 2049 2050
	ret = drm_gem_create_mmap_offset(&obj->base);
out:
	dev_priv->mm.shrinker_no_lock_stealing = false;

	return ret;
2051 2052 2053 2054 2055 2056 2057
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2058
int
2059 2060
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2061
		  uint32_t handle,
2062
		  uint64_t *offset)
2063
{
2064
	struct drm_i915_gem_object *obj;
2065 2066
	int ret;

2067
	ret = i915_mutex_lock_interruptible(dev);
2068
	if (ret)
2069
		return ret;
2070

2071
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
2072
	if (&obj->base == NULL) {
2073 2074 2075
		ret = -ENOENT;
		goto unlock;
	}
2076

2077
	if (obj->madv != I915_MADV_WILLNEED) {
2078
		DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2079
		ret = -EFAULT;
2080
		goto out;
2081 2082
	}

2083 2084 2085
	ret = i915_gem_object_create_mmap_offset(obj);
	if (ret)
		goto out;
2086

2087
	*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2088

2089
out:
2090
	drm_gem_object_unreference(&obj->base);
2091
unlock:
2092
	mutex_unlock(&dev->struct_mutex);
2093
	return ret;
2094 2095
}

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2117
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2118 2119
}

D
Daniel Vetter 已提交
2120 2121 2122
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2123
{
2124
	i915_gem_object_free_mmap_offset(obj);
2125

2126 2127
	if (obj->base.filp == NULL)
		return;
2128

D
Daniel Vetter 已提交
2129 2130 2131 2132 2133
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2134
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
D
Daniel Vetter 已提交
2135 2136
	obj->madv = __I915_MADV_PURGED;
}
2137

2138 2139 2140
/* Try to discard unwanted pages */
static void
i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2141
{
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155
	struct address_space *mapping;

	switch (obj->madv) {
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

	mapping = file_inode(obj->base.filp)->i_mapping,
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2156 2157
}

2158
static void
2159
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2160
{
2161 2162
	struct sg_page_iter sg_iter;
	int ret;
2163

2164
	BUG_ON(obj->madv == __I915_MADV_PURGED);
2165

C
Chris Wilson 已提交
2166
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
2167
	if (WARN_ON(ret)) {
C
Chris Wilson 已提交
2168 2169 2170
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
2171
		i915_gem_clflush_object(obj, true);
C
Chris Wilson 已提交
2172 2173 2174
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

I
Imre Deak 已提交
2175 2176
	i915_gem_gtt_finish_object(obj);

2177
	if (i915_gem_object_needs_bit17_swizzle(obj))
2178 2179
		i915_gem_object_save_bit_17_swizzle(obj);

2180 2181
	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
2182

2183
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2184
		struct page *page = sg_page_iter_page(&sg_iter);
2185

2186
		if (obj->dirty)
2187
			set_page_dirty(page);
2188

2189
		if (obj->madv == I915_MADV_WILLNEED)
2190
			mark_page_accessed(page);
2191

2192
		put_page(page);
2193
	}
2194
	obj->dirty = 0;
2195

2196 2197
	sg_free_table(obj->pages);
	kfree(obj->pages);
2198
}
C
Chris Wilson 已提交
2199

2200
int
2201 2202 2203 2204
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
	const struct drm_i915_gem_object_ops *ops = obj->ops;

2205
	if (obj->pages == NULL)
2206 2207
		return 0;

2208 2209 2210
	if (obj->pages_pin_count)
		return -EBUSY;

2211
	BUG_ON(i915_gem_obj_bound_any(obj));
B
Ben Widawsky 已提交
2212

2213 2214 2215
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2216
	list_del(&obj->global_list);
2217

2218
	if (obj->mapping) {
2219 2220 2221 2222
		if (is_vmalloc_addr(obj->mapping))
			vunmap(obj->mapping);
		else
			kunmap(kmap_to_page(obj->mapping));
2223 2224 2225
		obj->mapping = NULL;
	}

2226
	ops->put_pages(obj);
2227
	obj->pages = NULL;
2228

2229
	i915_gem_object_invalidate(obj);
C
Chris Wilson 已提交
2230 2231 2232 2233

	return 0;
}

2234
static int
C
Chris Wilson 已提交
2235
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2236
{
C
Chris Wilson 已提交
2237
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2238 2239
	int page_count, i;
	struct address_space *mapping;
2240 2241
	struct sg_table *st;
	struct scatterlist *sg;
2242
	struct sg_page_iter sg_iter;
2243
	struct page *page;
2244
	unsigned long last_pfn = 0;	/* suppress gcc warning */
I
Imre Deak 已提交
2245
	int ret;
C
Chris Wilson 已提交
2246
	gfp_t gfp;
2247

C
Chris Wilson 已提交
2248 2249 2250 2251 2252 2253 2254
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);

2255 2256 2257 2258
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

2259
	page_count = obj->base.size / PAGE_SIZE;
2260 2261
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2262
		return -ENOMEM;
2263
	}
2264

2265 2266 2267 2268 2269
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
A
Al Viro 已提交
2270
	mapping = file_inode(obj->base.filp)->i_mapping;
2271
	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2272
	gfp |= __GFP_NORETRY | __GFP_NOWARN;
2273 2274 2275
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
2276 2277
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
2278 2279 2280 2281 2282
			i915_gem_shrink(dev_priv,
					page_count,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_PURGEABLE);
C
Chris Wilson 已提交
2283 2284 2285 2286 2287 2288 2289 2290
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
			i915_gem_shrink_all(dev_priv);
2291
			page = shmem_read_mapping_page(mapping, i);
I
Imre Deak 已提交
2292 2293
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
C
Chris Wilson 已提交
2294
				goto err_pages;
I
Imre Deak 已提交
2295
			}
C
Chris Wilson 已提交
2296
		}
2297 2298 2299 2300 2301 2302 2303 2304
#ifdef CONFIG_SWIOTLB
		if (swiotlb_nr_tbl()) {
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
			sg = sg_next(sg);
			continue;
		}
#endif
2305 2306 2307 2308 2309 2310 2311 2312 2313
		if (!i || page_to_pfn(page) != last_pfn + 1) {
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2314 2315 2316

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2317
	}
2318 2319 2320 2321
#ifdef CONFIG_SWIOTLB
	if (!swiotlb_nr_tbl())
#endif
		sg_mark_end(sg);
2322 2323
	obj->pages = st;

I
Imre Deak 已提交
2324 2325 2326 2327
	ret = i915_gem_gtt_prepare_object(obj);
	if (ret)
		goto err_pages;

2328
	if (i915_gem_object_needs_bit17_swizzle(obj))
2329 2330
		i915_gem_object_do_bit_17_swizzle(obj);

2331 2332 2333 2334
	if (obj->tiling_mode != I915_TILING_NONE &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
		i915_gem_object_pin_pages(obj);

2335 2336 2337
	return 0;

err_pages:
2338 2339
	sg_mark_end(sg);
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2340
		put_page(sg_page_iter_page(&sg_iter));
2341 2342
	sg_free_table(st);
	kfree(st);
2343 2344 2345 2346 2347 2348 2349 2350 2351

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2352 2353 2354 2355
	if (ret == -ENOSPC)
		ret = -ENOMEM;

	return ret;
2356 2357
}

2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
/* Ensure that the associated pages are gathered from the backing storage
 * and pinned into our object. i915_gem_object_get_pages() may be called
 * multiple times before they are released by a single call to
 * i915_gem_object_put_pages() - once the pages are no longer referenced
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	const struct drm_i915_gem_object_ops *ops = obj->ops;
	int ret;

2372
	if (obj->pages)
2373 2374
		return 0;

2375
	if (obj->madv != I915_MADV_WILLNEED) {
2376
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
2377
		return -EFAULT;
2378 2379
	}

2380 2381
	BUG_ON(obj->pages_pin_count);

2382 2383 2384 2385
	ret = ops->get_pages(obj);
	if (ret)
		return ret;

2386
	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2387 2388 2389 2390

	obj->get_page.sg = obj->pages->sgl;
	obj->get_page.last = 0;

2391
	return 0;
2392 2393
}

2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	i915_gem_object_pin_pages(obj);

	if (obj->mapping == NULL) {
		struct page **pages;

2409 2410 2411 2412 2413 2414 2415
		pages = NULL;
		if (obj->base.size == PAGE_SIZE)
			obj->mapping = kmap(sg_page(obj->pages->sgl));
		else
			pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
					       sizeof(*pages),
					       GFP_TEMPORARY);
2416
		if (pages != NULL) {
2417 2418 2419
			struct sg_page_iter sg_iter;
			int n;

2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
			n = 0;
			for_each_sg_page(obj->pages->sgl, &sg_iter,
					 obj->pages->nents, 0)
				pages[n++] = sg_page_iter_page(&sg_iter);

			obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
			drm_free_large(pages);
		}
		if (obj->mapping == NULL) {
			i915_gem_object_unpin_pages(obj);
			return ERR_PTR(-ENOMEM);
		}
	}

	return obj->mapping;
}

2437
void i915_vma_move_to_active(struct i915_vma *vma,
2438
			     struct drm_i915_gem_request *req)
2439
{
2440
	struct drm_i915_gem_object *obj = vma->obj;
2441
	struct intel_engine_cs *engine;
2442

2443
	engine = i915_gem_request_get_engine(req);
2444 2445

	/* Add a reference if we're newly entering the active list. */
2446
	if (obj->active == 0)
2447
		drm_gem_object_reference(&obj->base);
2448
	obj->active |= intel_engine_flag(engine);
2449

2450
	list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
2451
	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
2452

2453
	list_move_tail(&vma->vm_link, &vma->vm->active_list);
2454 2455
}

2456 2457
static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
B
Ben Widawsky 已提交
2458
{
2459 2460
	GEM_BUG_ON(obj->last_write_req == NULL);
	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
2461 2462

	i915_gem_request_assign(&obj->last_write_req, NULL);
2463
	intel_fb_obj_flush(obj, true, ORIGIN_CS);
B
Ben Widawsky 已提交
2464 2465
}

2466
static void
2467
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2468
{
2469
	struct i915_vma *vma;
2470

2471 2472
	GEM_BUG_ON(obj->last_read_req[ring] == NULL);
	GEM_BUG_ON(!(obj->active & (1 << ring)));
2473

2474
	list_del_init(&obj->engine_list[ring]);
2475 2476
	i915_gem_request_assign(&obj->last_read_req[ring], NULL);

2477
	if (obj->last_write_req && obj->last_write_req->engine->id == ring)
2478 2479 2480 2481 2482
		i915_gem_object_retire__write(obj);

	obj->active &= ~(1 << ring);
	if (obj->active)
		return;
2483

2484 2485 2486 2487 2488 2489 2490
	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	list_move_tail(&obj->global_list,
		       &to_i915(obj->base.dev)->mm.bound_list);

2491 2492 2493
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!list_empty(&vma->vm_link))
			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
2494
	}
2495

2496
	i915_gem_request_assign(&obj->last_fenced_req, NULL);
2497
	drm_gem_object_unreference(&obj->base);
2498 2499
}

2500
static int
2501
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2502
{
2503
	struct drm_i915_private *dev_priv = dev->dev_private;
2504
	struct intel_engine_cs *engine;
2505
	int ret;
2506

2507
	/* Carefully retire all requests without writing to the rings */
2508
	for_each_engine(engine, dev_priv) {
2509
		ret = intel_engine_idle(engine);
2510 2511
		if (ret)
			return ret;
2512 2513
	}
	i915_gem_retire_requests(dev);
2514 2515

	/* Finally reset hw state */
2516
	for_each_engine(engine, dev_priv)
2517
		intel_ring_init_seqno(engine, seqno);
2518

2519
	return 0;
2520 2521
}

2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	if (seqno == 0)
		return -EINVAL;

	/* HWS page needs to be set less than what we
	 * will inject to ring
	 */
	ret = i915_gem_init_seqno(dev, seqno - 1);
	if (ret)
		return ret;

	/* Carefully set the last_seqno value so that wrap
	 * detection still works
	 */
	dev_priv->next_seqno = seqno;
	dev_priv->last_seqno = seqno - 1;
	if (dev_priv->last_seqno == 0)
		dev_priv->last_seqno--;

	return 0;
}

2548 2549
int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2550
{
2551 2552 2553 2554
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* reserve 0 for non-seqno */
	if (dev_priv->next_seqno == 0) {
2555
		int ret = i915_gem_init_seqno(dev, 0);
2556 2557
		if (ret)
			return ret;
2558

2559 2560
		dev_priv->next_seqno = 1;
	}
2561

2562
	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2563
	return 0;
2564 2565
}

2566 2567 2568 2569 2570
/*
 * NB: This function is not allowed to fail. Doing so would mean the the
 * request is not being tracked for completion but the work itself is
 * going to happen on the hardware. This would be a Bad Thing(tm).
 */
2571
void __i915_add_request(struct drm_i915_gem_request *request,
2572 2573
			struct drm_i915_gem_object *obj,
			bool flush_caches)
2574
{
2575
	struct intel_engine_cs *engine;
2576
	struct drm_i915_private *dev_priv;
2577
	struct intel_ringbuffer *ringbuf;
2578
	u32 request_start;
2579
	u32 reserved_tail;
2580 2581
	int ret;

2582
	if (WARN_ON(request == NULL))
2583
		return;
2584

2585
	engine = request->engine;
2586
	dev_priv = request->i915;
2587 2588
	ringbuf = request->ringbuf;

2589 2590 2591 2592 2593
	/*
	 * To ensure that this call will not fail, space for its emissions
	 * should already have been reserved in the ring buffer. Let the ring
	 * know that it is time to use that space up.
	 */
2594
	request_start = intel_ring_get_tail(ringbuf);
2595 2596 2597
	reserved_tail = request->reserved_space;
	request->reserved_space = 0;

2598 2599 2600 2601 2602 2603 2604
	/*
	 * Emit any outstanding flushes - execbuf can fail to emit the flush
	 * after having emitted the batchbuffer command. Hence we need to fix
	 * things up similar to emitting the lazy request. The difference here
	 * is that the flush _must_ happen before the next request, no matter
	 * what.
	 */
2605 2606
	if (flush_caches) {
		if (i915.enable_execlists)
2607
			ret = logical_ring_flush_all_caches(request);
2608
		else
2609
			ret = intel_ring_flush_all_caches(request);
2610 2611 2612
		/* Not allowed to fail! */
		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
	}
2613

2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
	trace_i915_gem_request_add(request);

	request->head = request_start;

	/* Whilst this request exists, batch_obj will be on the
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
	request->batch_obj = obj;

	/* Seal the request and mark it as pending execution. Note that
	 * we may inspect this state, without holding any locks, during
	 * hangcheck. Hence we apply the barrier to ensure that we do not
	 * see a more recent value in the hws than we are tracking.
	 */
	request->emitted_jiffies = jiffies;
	request->previous_seqno = engine->last_submitted_seqno;
	smp_store_mb(engine->last_submitted_seqno, request->seqno);
	list_add_tail(&request->list, &engine->request_list);

2636 2637 2638 2639 2640
	/* Record the position of the start of the request so that
	 * should we detect the updated seqno part-way through the
	 * GPU processing the request, we never over-estimate the
	 * position of the head.
	 */
2641
	request->postfix = intel_ring_get_tail(ringbuf);
2642

2643
	if (i915.enable_execlists)
2644
		ret = engine->emit_request(request);
2645
	else {
2646
		ret = engine->add_request(request);
2647 2648

		request->tail = intel_ring_get_tail(ringbuf);
2649
	}
2650 2651
	/* Not allowed to fail! */
	WARN(ret, "emit|add_request failed: %d!\n", ret);
2652

2653
	i915_queue_hangcheck(engine->dev);
2654

2655 2656 2657 2658
	queue_delayed_work(dev_priv->wq,
			   &dev_priv->mm.retire_work,
			   round_jiffies_up_relative(HZ));
	intel_mark_busy(dev_priv->dev);
2659

2660
	/* Sanity check that the reserved size was large enough. */
2661 2662 2663 2664 2665 2666 2667
	ret = intel_ring_get_tail(ringbuf) - request_start;
	if (ret < 0)
		ret += ringbuf->size;
	WARN_ONCE(ret > reserved_tail,
		  "Not enough space reserved (%d bytes) "
		  "for adding the request (%d bytes)\n",
		  reserved_tail, ret);
2668 2669
}

2670
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2671
				   const struct intel_context *ctx)
2672
{
2673
	unsigned long elapsed;
2674

2675 2676 2677
	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;

	if (ctx->hang_stats.banned)
2678 2679
		return true;

2680 2681
	if (ctx->hang_stats.ban_period_seconds &&
	    elapsed <= ctx->hang_stats.ban_period_seconds) {
2682
		if (!i915_gem_context_is_default(ctx)) {
2683
			DRM_DEBUG("context hanging too fast, banning!\n");
2684
			return true;
2685 2686 2687
		} else if (i915_stop_ring_allow_ban(dev_priv)) {
			if (i915_stop_ring_allow_warn(dev_priv))
				DRM_ERROR("gpu hanging too fast, banning!\n");
2688
			return true;
2689
		}
2690 2691 2692 2693 2694
	}

	return false;
}

2695
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2696
				  struct intel_context *ctx,
2697
				  const bool guilty)
2698
{
2699 2700 2701 2702
	struct i915_ctx_hang_stats *hs;

	if (WARN_ON(!ctx))
		return;
2703

2704 2705 2706
	hs = &ctx->hang_stats;

	if (guilty) {
2707
		hs->banned = i915_context_is_banned(dev_priv, ctx);
2708 2709 2710 2711
		hs->batch_active++;
		hs->guilty_ts = get_seconds();
	} else {
		hs->batch_pending++;
2712 2713 2714
	}
}

2715 2716 2717 2718 2719 2720
void i915_gem_request_free(struct kref *req_ref)
{
	struct drm_i915_gem_request *req = container_of(req_ref,
						 typeof(*req), ref);
	struct intel_context *ctx = req->ctx;

2721 2722 2723
	if (req->file_priv)
		i915_gem_request_remove_from_client(req);

2724
	if (ctx) {
2725
		if (i915.enable_execlists)
2726
			intel_lr_context_unpin(ctx, req->engine);
2727

2728 2729
		i915_gem_context_unreference(ctx);
	}
2730

2731
	kmem_cache_free(req->i915->requests, req);
2732 2733
}

2734
static inline int
2735
__i915_gem_request_alloc(struct intel_engine_cs *engine,
2736 2737
			 struct intel_context *ctx,
			 struct drm_i915_gem_request **req_out)
2738
{
2739
	struct drm_i915_private *dev_priv = to_i915(engine->dev);
2740
	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
D
Daniel Vetter 已提交
2741
	struct drm_i915_gem_request *req;
2742 2743
	int ret;

2744 2745 2746
	if (!req_out)
		return -EINVAL;

2747
	*req_out = NULL;
2748

2749 2750 2751 2752 2753
	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
	 * and restart.
	 */
	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
2754 2755 2756
	if (ret)
		return ret;

D
Daniel Vetter 已提交
2757 2758
	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
	if (req == NULL)
2759 2760
		return -ENOMEM;

2761
	ret = i915_gem_get_seqno(engine->dev, &req->seqno);
2762 2763
	if (ret)
		goto err;
2764

2765 2766
	kref_init(&req->ref);
	req->i915 = dev_priv;
2767
	req->engine = engine;
2768
	req->reset_counter = reset_counter;
2769 2770
	req->ctx  = ctx;
	i915_gem_context_reference(req->ctx);
2771

2772 2773 2774 2775 2776 2777 2778
	/*
	 * Reserve space in the ring buffer for all the commands required to
	 * eventually emit this request. This is to guarantee that the
	 * i915_add_request() call can't fail. Note that the reserve may need
	 * to be redone if the request is not actually submitted straight
	 * away, e.g. because a GPU scheduler has deferred it.
	 */
2779
	req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
2780 2781 2782 2783 2784 2785 2786

	if (i915.enable_execlists)
		ret = intel_logical_ring_alloc_request_extras(req);
	else
		ret = intel_ring_alloc_request_extras(req);
	if (ret)
		goto err_ctx;
2787

2788
	*req_out = req;
2789
	return 0;
2790

2791 2792
err_ctx:
	i915_gem_context_unreference(ctx);
2793 2794 2795
err:
	kmem_cache_free(dev_priv->requests, req);
	return ret;
2796 2797
}

2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
/**
 * i915_gem_request_alloc - allocate a request structure
 *
 * @engine: engine that we wish to issue the request on.
 * @ctx: context that the request will be associated with.
 *       This can be NULL if the request is not directly related to
 *       any specific user context, in which case this function will
 *       choose an appropriate context to use.
 *
 * Returns a pointer to the allocated request if successful,
 * or an error code if not.
 */
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
		       struct intel_context *ctx)
{
	struct drm_i915_gem_request *req;
	int err;

	if (ctx == NULL)
2818
		ctx = to_i915(engine->dev)->kernel_context;
2819 2820 2821 2822
	err = __i915_gem_request_alloc(engine, ctx, &req);
	return err ? ERR_PTR(err) : req;
}

2823
struct drm_i915_gem_request *
2824
i915_gem_find_active_request(struct intel_engine_cs *engine)
2825
{
2826 2827
	struct drm_i915_gem_request *request;

2828
	list_for_each_entry(request, &engine->request_list, list) {
2829
		if (i915_gem_request_completed(request, false))
2830
			continue;
2831

2832
		return request;
2833
	}
2834 2835 2836 2837

	return NULL;
}

2838
static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
2839
				       struct intel_engine_cs *engine)
2840 2841 2842 2843
{
	struct drm_i915_gem_request *request;
	bool ring_hung;

2844
	request = i915_gem_find_active_request(engine);
2845 2846 2847 2848

	if (request == NULL)
		return;

2849
	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2850

2851
	i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2852

2853
	list_for_each_entry_continue(request, &engine->request_list, list)
2854
		i915_set_reset_status(dev_priv, request->ctx, false);
2855
}
2856

2857
static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
2858
					struct intel_engine_cs *engine)
2859
{
2860 2861
	struct intel_ringbuffer *buffer;

2862
	while (!list_empty(&engine->active_list)) {
2863
		struct drm_i915_gem_object *obj;
2864

2865
		obj = list_first_entry(&engine->active_list,
2866
				       struct drm_i915_gem_object,
2867
				       engine_list[engine->id]);
2868

2869
		i915_gem_object_retire__read(obj, engine->id);
2870
	}
2871

2872 2873 2874 2875 2876 2877
	/*
	 * Clear the execlists queue up before freeing the requests, as those
	 * are the ones that keep the context and ringbuffer backing objects
	 * pinned in place.
	 */

2878
	if (i915.enable_execlists) {
2879 2880
		/* Ensure irq handler finishes or is cancelled. */
		tasklet_kill(&engine->irq_tasklet);
2881

2882
		spin_lock_bh(&engine->execlist_lock);
2883
		/* list_splice_tail_init checks for empty lists */
2884 2885
		list_splice_tail_init(&engine->execlist_queue,
				      &engine->execlist_retired_req_list);
2886
		spin_unlock_bh(&engine->execlist_lock);
2887

2888
		intel_execlists_retire_requests(engine);
2889 2890
	}

2891 2892 2893 2894 2895 2896 2897
	/*
	 * We must free the requests after all the corresponding objects have
	 * been moved off active lists. Which is the same order as the normal
	 * retire_requests function does. This is important if object hold
	 * implicit references on things like e.g. ppgtt address spaces through
	 * the request.
	 */
2898
	while (!list_empty(&engine->request_list)) {
2899 2900
		struct drm_i915_gem_request *request;

2901
		request = list_first_entry(&engine->request_list,
2902 2903 2904
					   struct drm_i915_gem_request,
					   list);

2905
		i915_gem_request_retire(request);
2906
	}
2907 2908 2909 2910 2911 2912 2913 2914

	/* Having flushed all requests from all queues, we know that all
	 * ringbuffers must now be empty. However, since we do not reclaim
	 * all space when retiring the request (to prevent HEADs colliding
	 * with rapid ringbuffer wraparound) the amount of available space
	 * upon reset is less than when we start. Do one more pass over
	 * all the ringbuffers to reset last_retired_head.
	 */
2915
	list_for_each_entry(buffer, &engine->buffers, link) {
2916 2917 2918
		buffer->last_retired_head = buffer->tail;
		intel_ring_update_space(buffer);
	}
2919 2920

	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
2921 2922
}

2923
void i915_gem_reset(struct drm_device *dev)
2924
{
2925
	struct drm_i915_private *dev_priv = dev->dev_private;
2926
	struct intel_engine_cs *engine;
2927

2928 2929 2930 2931 2932
	/*
	 * Before we free the objects from the requests, we need to inspect
	 * them for finding the guilty party. As the requests only borrow
	 * their reference to the objects, the inspection must be done first.
	 */
2933
	for_each_engine(engine, dev_priv)
2934
		i915_gem_reset_engine_status(dev_priv, engine);
2935

2936
	for_each_engine(engine, dev_priv)
2937
		i915_gem_reset_engine_cleanup(dev_priv, engine);
2938

2939 2940
	i915_gem_context_reset(dev);

2941
	i915_gem_restore_fences(dev);
2942 2943

	WARN_ON(i915_verify_lists(dev));
2944 2945 2946 2947 2948
}

/**
 * This function clears the request list as sequence numbers are passed.
 */
2949
void
2950
i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
2951
{
2952
	WARN_ON(i915_verify_lists(engine->dev));
2953

2954 2955 2956 2957
	/* Retire requests first as we use it above for the early return.
	 * If we retire requests last, we may use a later seqno and so clear
	 * the requests lists without clearing the active list, leading to
	 * confusion.
2958
	 */
2959
	while (!list_empty(&engine->request_list)) {
2960 2961
		struct drm_i915_gem_request *request;

2962
		request = list_first_entry(&engine->request_list,
2963 2964 2965
					   struct drm_i915_gem_request,
					   list);

2966
		if (!i915_gem_request_completed(request, true))
2967 2968
			break;

2969
		i915_gem_request_retire(request);
2970
	}
2971

2972 2973 2974 2975
	/* Move any buffers on the active list that are no longer referenced
	 * by the ringbuffer to the flushing/inactive lists as appropriate,
	 * before we free the context associated with the requests.
	 */
2976
	while (!list_empty(&engine->active_list)) {
2977 2978
		struct drm_i915_gem_object *obj;

2979 2980
		obj = list_first_entry(&engine->active_list,
				       struct drm_i915_gem_object,
2981
				       engine_list[engine->id]);
2982

2983
		if (!list_empty(&obj->last_read_req[engine->id]->list))
2984 2985
			break;

2986
		i915_gem_object_retire__read(obj, engine->id);
2987 2988
	}

2989 2990 2991 2992
	if (unlikely(engine->trace_irq_req &&
		     i915_gem_request_completed(engine->trace_irq_req, true))) {
		engine->irq_put(engine);
		i915_gem_request_assign(&engine->trace_irq_req, NULL);
2993
	}
2994

2995
	WARN_ON(i915_verify_lists(engine->dev));
2996 2997
}

2998
bool
2999 3000
i915_gem_retire_requests(struct drm_device *dev)
{
3001
	struct drm_i915_private *dev_priv = dev->dev_private;
3002
	struct intel_engine_cs *engine;
3003
	bool idle = true;
3004

3005
	for_each_engine(engine, dev_priv) {
3006 3007
		i915_gem_retire_requests_ring(engine);
		idle &= list_empty(&engine->request_list);
3008
		if (i915.enable_execlists) {
3009
			spin_lock_bh(&engine->execlist_lock);
3010
			idle &= list_empty(&engine->execlist_queue);
3011
			spin_unlock_bh(&engine->execlist_lock);
3012

3013
			intel_execlists_retire_requests(engine);
3014
		}
3015 3016 3017 3018 3019 3020 3021 3022
	}

	if (idle)
		mod_delayed_work(dev_priv->wq,
				   &dev_priv->mm.idle_work,
				   msecs_to_jiffies(100));

	return idle;
3023 3024
}

3025
static void
3026 3027
i915_gem_retire_work_handler(struct work_struct *work)
{
3028 3029 3030
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv), mm.retire_work.work);
	struct drm_device *dev = dev_priv->dev;
3031
	bool idle;
3032

3033
	/* Come back later if the device is busy... */
3034 3035 3036 3037
	idle = false;
	if (mutex_trylock(&dev->struct_mutex)) {
		idle = i915_gem_retire_requests(dev);
		mutex_unlock(&dev->struct_mutex);
3038
	}
3039
	if (!idle)
3040 3041
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
				   round_jiffies_up_relative(HZ));
3042
}
3043

3044 3045 3046 3047 3048
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv), mm.idle_work.work);
3049
	struct drm_device *dev = dev_priv->dev;
3050
	struct intel_engine_cs *engine;
3051

3052 3053
	for_each_engine(engine, dev_priv)
		if (!list_empty(&engine->request_list))
3054
			return;
3055

3056
	/* we probably should sync with hangcheck here, using cancel_work_sync.
3057
	 * Also locking seems to be fubar here, engine->request_list is protected
3058 3059
	 * by dev->struct_mutex. */

3060 3061 3062
	intel_mark_idle(dev);

	if (mutex_trylock(&dev->struct_mutex)) {
3063
		for_each_engine(engine, dev_priv)
3064
			i915_gem_batch_pool_fini(&engine->batch_pool);
3065

3066 3067
		mutex_unlock(&dev->struct_mutex);
	}
3068 3069
}

3070 3071 3072 3073 3074 3075 3076 3077
/**
 * Ensures that an object will eventually get non-busy by flushing any required
 * write domains, emitting any outstanding lazy request and retiring and
 * completed requests.
 */
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
3078
	int i;
3079 3080 3081

	if (!obj->active)
		return 0;
3082

3083
	for (i = 0; i < I915_NUM_ENGINES; i++) {
3084
		struct drm_i915_gem_request *req;
3085

3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
		req = obj->last_read_req[i];
		if (req == NULL)
			continue;

		if (list_empty(&req->list))
			goto retire;

		if (i915_gem_request_completed(req, true)) {
			__i915_gem_request_retire__upto(req);
retire:
			i915_gem_object_retire__read(obj, i);
		}
3098 3099 3100 3101 3102
	}

	return 0;
}

3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
 * @DRM_IOCTL_ARGS: standard ioctl arguments
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
3130
	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3131 3132
	int i, n = 0;
	int ret;
3133

3134 3135 3136
	if (args->flags != 0)
		return -EINVAL;

3137 3138 3139 3140 3141 3142 3143 3144 3145 3146
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
	if (&obj->base == NULL) {
		mutex_unlock(&dev->struct_mutex);
		return -ENOENT;
	}

3147 3148
	/* Need to make sure the object gets inactive eventually. */
	ret = i915_gem_object_flush_active(obj);
3149 3150 3151
	if (ret)
		goto out;

3152
	if (!obj->active)
3153
		goto out;
3154 3155

	/* Do this after OLR check to make sure we make forward progress polling
3156
	 * on this IOCTL with a timeout == 0 (like busy ioctl)
3157
	 */
3158
	if (args->timeout_ns == 0) {
3159 3160 3161 3162 3163
		ret = -ETIME;
		goto out;
	}

	drm_gem_object_unreference(&obj->base);
3164

3165
	for (i = 0; i < I915_NUM_ENGINES; i++) {
3166 3167 3168 3169 3170 3171
		if (obj->last_read_req[i] == NULL)
			continue;

		req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
	}

3172 3173
	mutex_unlock(&dev->struct_mutex);

3174 3175
	for (i = 0; i < n; i++) {
		if (ret == 0)
3176
			ret = __i915_wait_request(req[i], true,
3177
						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3178
						  to_rps_client(file));
3179 3180
		i915_gem_request_unreference__unlocked(req[i]);
	}
3181
	return ret;
3182 3183 3184 3185 3186 3187 3188

out:
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

3189 3190 3191
static int
__i915_gem_object_sync(struct drm_i915_gem_object *obj,
		       struct intel_engine_cs *to,
3192 3193
		       struct drm_i915_gem_request *from_req,
		       struct drm_i915_gem_request **to_req)
3194 3195 3196 3197
{
	struct intel_engine_cs *from;
	int ret;

3198
	from = i915_gem_request_get_engine(from_req);
3199 3200 3201
	if (to == from)
		return 0;

3202
	if (i915_gem_request_completed(from_req, true))
3203 3204 3205
		return 0;

	if (!i915_semaphore_is_enabled(obj->base.dev)) {
3206
		struct drm_i915_private *i915 = to_i915(obj->base.dev);
3207
		ret = __i915_wait_request(from_req,
3208 3209 3210
					  i915->mm.interruptible,
					  NULL,
					  &i915->rps.semaphores);
3211 3212 3213
		if (ret)
			return ret;

3214
		i915_gem_object_retire_request(obj, from_req);
3215 3216
	} else {
		int idx = intel_ring_sync_index(from, to);
3217 3218 3219
		u32 seqno = i915_gem_request_get_seqno(from_req);

		WARN_ON(!to_req);
3220 3221 3222 3223

		if (seqno <= from->semaphore.sync_seqno[idx])
			return 0;

3224
		if (*to_req == NULL) {
3225 3226 3227 3228 3229 3230 3231
			struct drm_i915_gem_request *req;

			req = i915_gem_request_alloc(to, NULL);
			if (IS_ERR(req))
				return PTR_ERR(req);

			*to_req = req;
3232 3233
		}

3234 3235
		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
		ret = to->semaphore.sync_to(*to_req, from, seqno);
3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249
		if (ret)
			return ret;

		/* We use last_read_req because sync_to()
		 * might have just caused seqno wrap under
		 * the radar.
		 */
		from->semaphore.sync_seqno[idx] =
			i915_gem_request_get_seqno(obj->last_read_req[from->id]);
	}

	return 0;
}

3250 3251 3252 3253 3254
/**
 * i915_gem_object_sync - sync an object to a ring.
 *
 * @obj: object which may be in use on another ring.
 * @to: ring we wish to use the object on. May be NULL.
3255 3256 3257
 * @to_req: request we wish to use the object for. See below.
 *          This will be allocated and returned if a request is
 *          required but not passed in.
3258 3259 3260
 *
 * This code is meant to abstract object synchronization with the GPU.
 * Calling with NULL implies synchronizing the object with the CPU
3261
 * rather than a particular GPU ring. Conceptually we serialise writes
3262
 * between engines inside the GPU. We only allow one engine to write
3263 3264 3265 3266 3267 3268 3269 3270 3271
 * into a buffer at any time, but multiple readers. To ensure each has
 * a coherent view of memory, we must:
 *
 * - If there is an outstanding write request to the object, the new
 *   request must wait for it to complete (either CPU or in hw, requests
 *   on the same ring will be naturally ordered).
 *
 * - If we are a write request (pending_write_domain is set), the new
 *   request must wait for outstanding read requests to complete.
3272
 *
3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
 * For CPU synchronisation (NULL to) no request is required. For syncing with
 * rings to_req must be non-NULL. However, a request does not have to be
 * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
 * request will be allocated automatically and returned through *to_req. Note
 * that it is not guaranteed that commands will be emitted (because the system
 * might already be idle). Hence there is no need to create a request that
 * might never have any work submitted. Note further that if a request is
 * returned in *to_req, it is the responsibility of the caller to submit
 * that request (after potentially adding more work to it).
 *
3283 3284
 * Returns 0 if successful, else propagates up the lower layer error.
 */
3285 3286
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
3287 3288
		     struct intel_engine_cs *to,
		     struct drm_i915_gem_request **to_req)
3289
{
3290
	const bool readonly = obj->base.pending_write_domain == 0;
3291
	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
3292
	int ret, i, n;
3293

3294
	if (!obj->active)
3295 3296
		return 0;

3297 3298
	if (to == NULL)
		return i915_gem_object_wait_rendering(obj, readonly);
3299

3300 3301 3302 3303 3304
	n = 0;
	if (readonly) {
		if (obj->last_write_req)
			req[n++] = obj->last_write_req;
	} else {
3305
		for (i = 0; i < I915_NUM_ENGINES; i++)
3306 3307 3308 3309
			if (obj->last_read_req[i])
				req[n++] = obj->last_read_req[i];
	}
	for (i = 0; i < n; i++) {
3310
		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
3311 3312 3313
		if (ret)
			return ret;
	}
3314

3315
	return 0;
3316 3317
}

3318 3319 3320 3321 3322 3323 3324
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
	u32 old_write_domain, old_read_domains;

	/* Force a pagefault for domain tracking on next user access */
	i915_gem_release_mmap(obj);

3325 3326 3327
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		return;

3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338
	old_read_domains = obj->base.read_domains;
	old_write_domain = obj->base.write_domain;

	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);
}

3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349
static void __i915_vma_iounmap(struct i915_vma *vma)
{
	GEM_BUG_ON(vma->pin_count);

	if (vma->iomap == NULL)
		return;

	io_mapping_unmap(vma->iomap);
	vma->iomap = NULL;
}

3350
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3351
{
3352
	struct drm_i915_gem_object *obj = vma->obj;
3353
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3354
	int ret;
3355

3356
	if (list_empty(&vma->obj_link))
3357 3358
		return 0;

3359 3360 3361 3362
	if (!drm_mm_node_allocated(&vma->node)) {
		i915_gem_vma_destroy(vma);
		return 0;
	}
3363

B
Ben Widawsky 已提交
3364
	if (vma->pin_count)
3365
		return -EBUSY;
3366

3367 3368
	BUG_ON(obj->pages == NULL);

3369 3370 3371 3372 3373
	if (wait) {
		ret = i915_gem_object_wait_rendering(obj, false);
		if (ret)
			return ret;
	}
3374

3375
	if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3376
		i915_gem_object_finish_gtt(obj);
3377

3378 3379 3380 3381
		/* release the fence reg _after_ flushing */
		ret = i915_gem_object_put_fence(obj);
		if (ret)
			return ret;
3382 3383

		__i915_vma_iounmap(vma);
3384
	}
3385

3386
	trace_i915_vma_unbind(vma);
C
Chris Wilson 已提交
3387

3388
	vma->vm->unbind_vma(vma);
3389
	vma->bound = 0;
3390

3391
	list_del_init(&vma->vm_link);
3392
	if (vma->is_ggtt) {
3393 3394 3395 3396 3397 3398
		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
			obj->map_and_fenceable = false;
		} else if (vma->ggtt_view.pages) {
			sg_free_table(vma->ggtt_view.pages);
			kfree(vma->ggtt_view.pages);
		}
3399
		vma->ggtt_view.pages = NULL;
3400
	}
3401

B
Ben Widawsky 已提交
3402 3403 3404 3405
	drm_mm_remove_node(&vma->node);
	i915_gem_vma_destroy(vma);

	/* Since the unbound list is global, only move to that list if
3406
	 * no more VMAs exist. */
I
Imre Deak 已提交
3407
	if (list_empty(&obj->vma_list))
B
Ben Widawsky 已提交
3408
		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3409

3410 3411 3412 3413 3414 3415
	/* And finally now the object is completely decoupled from this vma,
	 * we can drop its hold on the backing storage and allow it to be
	 * reaped by the shrinker.
	 */
	i915_gem_object_unpin_pages(obj);

3416
	return 0;
3417 3418
}

3419 3420 3421 3422 3423 3424 3425 3426 3427 3428
int i915_vma_unbind(struct i915_vma *vma)
{
	return __i915_vma_unbind(vma, true);
}

int __i915_vma_unbind_no_wait(struct i915_vma *vma)
{
	return __i915_vma_unbind(vma, false);
}

3429
int i915_gpu_idle(struct drm_device *dev)
3430
{
3431
	struct drm_i915_private *dev_priv = dev->dev_private;
3432
	struct intel_engine_cs *engine;
3433
	int ret;
3434 3435

	/* Flush everything onto the inactive list. */
3436
	for_each_engine(engine, dev_priv) {
3437
		if (!i915.enable_execlists) {
3438 3439
			struct drm_i915_gem_request *req;

3440
			req = i915_gem_request_alloc(engine, NULL);
3441 3442
			if (IS_ERR(req))
				return PTR_ERR(req);
3443

3444
			ret = i915_switch_context(req);
3445
			i915_add_request_no_flush(req);
3446 3447
			if (ret)
				return ret;
3448
		}
3449

3450
		ret = intel_engine_idle(engine);
3451 3452 3453
		if (ret)
			return ret;
	}
3454

3455
	WARN_ON(i915_verify_lists(dev));
3456
	return 0;
3457 3458
}

3459
static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3460 3461
				     unsigned long cache_level)
{
3462
	struct drm_mm_node *gtt_space = &vma->node;
3463 3464
	struct drm_mm_node *other;

3465 3466 3467 3468 3469 3470
	/*
	 * On some machines we have to be careful when putting differing types
	 * of snoopable memory together to avoid the prefetcher crossing memory
	 * domains and dying. During vm initialisation, we decide whether or not
	 * these constraints apply and set the drm_mm.color_adjust
	 * appropriately.
3471
	 */
3472
	if (vma->vm->mm.color_adjust == NULL)
3473 3474
		return true;

3475
	if (!drm_mm_node_allocated(gtt_space))
3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491
		return true;

	if (list_empty(&gtt_space->node_list))
		return true;

	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
	if (other->allocated && !other->hole_follows && other->color != cache_level)
		return false;

	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
		return false;

	return true;
}

3492
/**
3493 3494
 * Finds free space in the GTT aperture and binds the object or a view of it
 * there.
3495
 */
3496
static struct i915_vma *
3497 3498
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
			   struct i915_address_space *vm,
3499
			   const struct i915_ggtt_view *ggtt_view,
3500
			   unsigned alignment,
3501
			   uint64_t flags)
3502
{
3503
	struct drm_device *dev = obj->base.dev;
3504 3505
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3506
	u32 fence_alignment, unfenced_alignment;
3507 3508
	u32 search_flag, alloc_flag;
	u64 start, end;
3509
	u64 size, fence_size;
B
Ben Widawsky 已提交
3510
	struct i915_vma *vma;
3511
	int ret;
3512

3513 3514 3515 3516 3517
	if (i915_is_ggtt(vm)) {
		u32 view_size;

		if (WARN_ON(!ggtt_view))
			return ERR_PTR(-EINVAL);
3518

3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547
		view_size = i915_ggtt_view_size(obj, ggtt_view);

		fence_size = i915_gem_get_gtt_size(dev,
						   view_size,
						   obj->tiling_mode);
		fence_alignment = i915_gem_get_gtt_alignment(dev,
							     view_size,
							     obj->tiling_mode,
							     true);
		unfenced_alignment = i915_gem_get_gtt_alignment(dev,
								view_size,
								obj->tiling_mode,
								false);
		size = flags & PIN_MAPPABLE ? fence_size : view_size;
	} else {
		fence_size = i915_gem_get_gtt_size(dev,
						   obj->base.size,
						   obj->tiling_mode);
		fence_alignment = i915_gem_get_gtt_alignment(dev,
							     obj->base.size,
							     obj->tiling_mode,
							     true);
		unfenced_alignment =
			i915_gem_get_gtt_alignment(dev,
						   obj->base.size,
						   obj->tiling_mode,
						   false);
		size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
	}
3548

3549 3550 3551
	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
	end = vm->total;
	if (flags & PIN_MAPPABLE)
3552
		end = min_t(u64, end, ggtt->mappable_end);
3553
	if (flags & PIN_ZONE_4G)
3554
		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3555

3556
	if (alignment == 0)
3557
		alignment = flags & PIN_MAPPABLE ? fence_alignment :
3558
						unfenced_alignment;
3559
	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3560 3561 3562
		DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
			  ggtt_view ? ggtt_view->type : 0,
			  alignment);
3563
		return ERR_PTR(-EINVAL);
3564 3565
	}

3566 3567 3568
	/* If binding the object/GGTT view requires more space than the entire
	 * aperture has, reject it early before evicting everything in a vain
	 * attempt to find space.
3569
	 */
3570
	if (size > end) {
3571
		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
3572 3573
			  ggtt_view ? ggtt_view->type : 0,
			  size,
3574
			  flags & PIN_MAPPABLE ? "mappable" : "total",
3575
			  end);
3576
		return ERR_PTR(-E2BIG);
3577 3578
	}

3579
	ret = i915_gem_object_get_pages(obj);
C
Chris Wilson 已提交
3580
	if (ret)
3581
		return ERR_PTR(ret);
C
Chris Wilson 已提交
3582

3583 3584
	i915_gem_object_pin_pages(obj);

3585 3586 3587
	vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
			  i915_gem_obj_lookup_or_create_vma(obj, vm);

3588
	if (IS_ERR(vma))
3589
		goto err_unpin;
B
Ben Widawsky 已提交
3590

3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608
	if (flags & PIN_OFFSET_FIXED) {
		uint64_t offset = flags & PIN_OFFSET_MASK;

		if (offset & (alignment - 1) || offset + size > end) {
			ret = -EINVAL;
			goto err_free_vma;
		}
		vma->node.start = offset;
		vma->node.size = size;
		vma->node.color = obj->cache_level;
		ret = drm_mm_reserve_node(&vm->mm, &vma->node);
		if (ret) {
			ret = i915_gem_evict_for_vma(vma);
			if (ret == 0)
				ret = drm_mm_reserve_node(&vm->mm, &vma->node);
		}
		if (ret)
			goto err_free_vma;
3609
	} else {
3610 3611 3612 3613 3614 3615 3616
		if (flags & PIN_HIGH) {
			search_flag = DRM_MM_SEARCH_BELOW;
			alloc_flag = DRM_MM_CREATE_TOP;
		} else {
			search_flag = DRM_MM_SEARCH_DEFAULT;
			alloc_flag = DRM_MM_CREATE_DEFAULT;
		}
3617

3618
search_free:
3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631
		ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
							  size, alignment,
							  obj->cache_level,
							  start, end,
							  search_flag,
							  alloc_flag);
		if (ret) {
			ret = i915_gem_evict_something(dev, vm, size, alignment,
						       obj->cache_level,
						       start, end,
						       flags);
			if (ret == 0)
				goto search_free;
3632

3633 3634
			goto err_free_vma;
		}
3635
	}
3636
	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
B
Ben Widawsky 已提交
3637
		ret = -EINVAL;
3638
		goto err_remove_node;
3639 3640
	}

3641
	trace_i915_vma_bind(vma, flags);
3642
	ret = i915_vma_bind(vma, obj->cache_level, flags);
3643
	if (ret)
I
Imre Deak 已提交
3644
		goto err_remove_node;
3645

3646
	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3647
	list_add_tail(&vma->vm_link, &vm->inactive_list);
3648

3649
	return vma;
B
Ben Widawsky 已提交
3650

3651
err_remove_node:
3652
	drm_mm_remove_node(&vma->node);
3653
err_free_vma:
B
Ben Widawsky 已提交
3654
	i915_gem_vma_destroy(vma);
3655
	vma = ERR_PTR(ret);
3656
err_unpin:
B
Ben Widawsky 已提交
3657
	i915_gem_object_unpin_pages(obj);
3658
	return vma;
3659 3660
}

3661
bool
3662 3663
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			bool force)
3664 3665 3666 3667 3668
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
3669
	if (obj->pages == NULL)
3670
		return false;
3671

3672 3673 3674 3675
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
3676
	if (obj->stolen || obj->phys_handle)
3677
		return false;
3678

3679 3680 3681 3682 3683 3684 3685 3686
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
3687 3688
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
		obj->cache_dirty = true;
3689
		return false;
3690
	}
3691

C
Chris Wilson 已提交
3692
	trace_i915_gem_object_clflush(obj);
3693
	drm_clflush_sg(obj->pages);
3694
	obj->cache_dirty = false;
3695 3696

	return true;
3697 3698 3699 3700
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3701
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3702
{
C
Chris Wilson 已提交
3703 3704
	uint32_t old_write_domain;

3705
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3706 3707
		return;

3708
	/* No actual flushing is required for the GTT write domain.  Writes
3709 3710
	 * to it immediately go to main memory as far as we know, so there's
	 * no chipset flush.  It also doesn't land in render cache.
3711 3712 3713 3714
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3715
	 */
3716 3717
	wmb();

3718 3719
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3720

3721
	intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3722

C
Chris Wilson 已提交
3723
	trace_i915_gem_object_change_domain(obj,
3724
					    obj->base.read_domains,
C
Chris Wilson 已提交
3725
					    old_write_domain);
3726 3727 3728 3729
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3730
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3731
{
C
Chris Wilson 已提交
3732
	uint32_t old_write_domain;
3733

3734
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3735 3736
		return;

3737
	if (i915_gem_clflush_object(obj, obj->pin_display))
3738 3739
		i915_gem_chipset_flush(obj->base.dev);

3740 3741
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3742

3743
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3744

C
Chris Wilson 已提交
3745
	trace_i915_gem_object_change_domain(obj,
3746
					    obj->base.read_domains,
C
Chris Wilson 已提交
3747
					    old_write_domain);
3748 3749
}

3750 3751 3752 3753 3754 3755
/**
 * Moves a single object to the GTT read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3756
int
3757
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3758
{
3759 3760 3761
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
C
Chris Wilson 已提交
3762
	uint32_t old_write_domain, old_read_domains;
3763
	struct i915_vma *vma;
3764
	int ret;
3765

3766 3767 3768
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3769
	ret = i915_gem_object_wait_rendering(obj, !write);
3770 3771 3772
	if (ret)
		return ret;

3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

3785
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
3786

3787 3788 3789 3790 3791 3792 3793
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3794 3795
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3796

3797 3798 3799
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3800 3801
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3802
	if (write) {
3803 3804 3805
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
		obj->dirty = 1;
3806 3807
	}

C
Chris Wilson 已提交
3808 3809 3810 3811
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3812
	/* And bump the LRU for this access */
3813 3814
	vma = i915_gem_obj_to_ggtt(obj);
	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3815
		list_move_tail(&vma->vm_link,
3816
			       &ggtt->base.inactive_list);
3817

3818 3819 3820
	return 0;
}

3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833
/**
 * Changes the cache-level of an object across all VMA.
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3834 3835 3836
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3837
	struct drm_device *dev = obj->base.dev;
3838
	struct i915_vma *vma, *next;
3839
	bool bound = false;
3840
	int ret = 0;
3841 3842

	if (obj->cache_level == cache_level)
3843
		goto out;
3844

3845 3846 3847 3848 3849
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3850
	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
3851 3852 3853 3854 3855 3856 3857 3858
		if (!drm_mm_node_allocated(&vma->node))
			continue;

		if (vma->pin_count) {
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3859
		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3860
			ret = i915_vma_unbind(vma);
3861 3862
			if (ret)
				return ret;
3863 3864
		} else
			bound = true;
3865 3866
	}

3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
	if (bound) {
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3879
		ret = i915_gem_object_wait_rendering(obj, false);
3880 3881 3882
		if (ret)
			return ret;

3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899
		if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3900 3901 3902
			ret = i915_gem_object_put_fence(obj);
			if (ret)
				return ret;
3903 3904 3905 3906 3907 3908 3909 3910
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3911 3912
		}

3913
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3914 3915 3916 3917 3918 3919 3920
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3921 3922
	}

3923
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3924 3925 3926
		vma->node.color = cache_level;
	obj->cache_level = cache_level;

3927
out:
3928 3929 3930 3931
	/* Flush the dirty CPU caches to the backing storage so that the
	 * object is now coherent at its new cache level (with respect
	 * to the access domain).
	 */
3932 3933 3934 3935 3936
	if (obj->cache_dirty &&
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
	    cpu_write_needs_clflush(obj)) {
		if (i915_gem_clflush_object(obj, true))
			i915_gem_chipset_flush(obj->base.dev);
3937 3938 3939 3940 3941
	}

	return 0;
}

B
Ben Widawsky 已提交
3942 3943
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3944
{
B
Ben Widawsky 已提交
3945
	struct drm_i915_gem_caching *args = data;
3946 3947 3948
	struct drm_i915_gem_object *obj;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3949 3950
	if (&obj->base == NULL)
		return -ENOENT;
3951

3952 3953 3954 3955 3956 3957
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3958 3959 3960 3961
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3962 3963 3964 3965
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3966

3967 3968
	drm_gem_object_unreference_unlocked(&obj->base);
	return 0;
3969 3970
}

B
Ben Widawsky 已提交
3971 3972
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3973
{
3974
	struct drm_i915_private *dev_priv = dev->dev_private;
B
Ben Widawsky 已提交
3975
	struct drm_i915_gem_caching *args = data;
3976 3977 3978 3979
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3980 3981
	switch (args->caching) {
	case I915_CACHING_NONE:
3982 3983
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3984
	case I915_CACHING_CACHED:
3985 3986 3987 3988 3989 3990
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3991
		if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3992 3993
			return -ENODEV;

3994 3995
		level = I915_CACHE_LLC;
		break;
3996 3997 3998
	case I915_CACHING_DISPLAY:
		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
		break;
3999 4000 4001 4002
	default:
		return -EINVAL;
	}

4003 4004
	intel_runtime_pm_get(dev_priv);

B
Ben Widawsky 已提交
4005 4006
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
4007
		goto rpm_put;
B
Ben Widawsky 已提交
4008

4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL) {
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);

	drm_gem_object_unreference(&obj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
4020 4021 4022
rpm_put:
	intel_runtime_pm_put(dev_priv);

4023 4024 4025
	return ret;
}

4026
/*
4027 4028 4029
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
4030 4031
 */
int
4032 4033
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
4034
				     const struct i915_ggtt_view *view)
4035
{
4036
	u32 old_read_domains, old_write_domain;
4037 4038
	int ret;

4039 4040 4041
	/* Mark the pin_display early so that we account for the
	 * display coherency whilst setting up the cache domains.
	 */
4042
	obj->pin_display++;
4043

4044 4045 4046 4047 4048 4049 4050 4051 4052
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
4053 4054
	ret = i915_gem_object_set_cache_level(obj,
					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
4055
	if (ret)
4056
		goto err_unpin_display;
4057

4058 4059 4060 4061
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
	 * always use map_and_fenceable for all scanout buffers.
	 */
4062 4063 4064
	ret = i915_gem_object_ggtt_pin(obj, view, alignment,
				       view->type == I915_GGTT_VIEW_NORMAL ?
				       PIN_MAPPABLE : 0);
4065
	if (ret)
4066
		goto err_unpin_display;
4067

4068
	i915_gem_object_flush_cpu_write_domain(obj);
4069

4070
	old_write_domain = obj->base.write_domain;
4071
	old_read_domains = obj->base.read_domains;
4072 4073 4074 4075

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4076
	obj->base.write_domain = 0;
4077
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4078 4079 4080

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
4081
					    old_write_domain);
4082 4083

	return 0;
4084 4085

err_unpin_display:
4086
	obj->pin_display--;
4087 4088 4089 4090
	return ret;
}

void
4091 4092
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
					 const struct i915_ggtt_view *view)
4093
{
4094 4095 4096
	if (WARN_ON(obj->pin_display == 0))
		return;

4097 4098
	i915_gem_object_ggtt_unpin_view(obj, view);

4099
	obj->pin_display--;
4100 4101
}

4102 4103 4104 4105 4106 4107
/**
 * Moves a single object to the CPU read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
4108
int
4109
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4110
{
C
Chris Wilson 已提交
4111
	uint32_t old_write_domain, old_read_domains;
4112 4113
	int ret;

4114 4115 4116
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

4117
	ret = i915_gem_object_wait_rendering(obj, !write);
4118 4119 4120
	if (ret)
		return ret;

4121
	i915_gem_object_flush_gtt_write_domain(obj);
4122

4123 4124
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
4125

4126
	/* Flush the CPU cache if it's still invalid. */
4127
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4128
		i915_gem_clflush_object(obj, false);
4129

4130
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4131 4132 4133 4134 4135
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4136
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4137 4138 4139 4140 4141

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
4142 4143
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4144
	}
4145

C
Chris Wilson 已提交
4146 4147 4148 4149
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

4150 4151 4152
	return 0;
}

4153 4154 4155
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
4156 4157 4158 4159
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
4160 4161 4162
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
4163
static int
4164
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4165
{
4166 4167
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_file_private *file_priv = file->driver_priv;
4168
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4169
	struct drm_i915_gem_request *request, *target = NULL;
4170
	int ret;
4171

4172 4173 4174 4175
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
	if (ret)
		return ret;

4176 4177 4178
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
4179

4180
	spin_lock(&file_priv->mm.lock);
4181
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4182 4183
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
4184

4185 4186 4187 4188 4189 4190 4191
		/*
		 * Note that the request might not have been submitted yet.
		 * In which case emitted_jiffies will be zero.
		 */
		if (!request->emitted_jiffies)
			continue;

4192
		target = request;
4193
	}
4194 4195
	if (target)
		i915_gem_request_reference(target);
4196
	spin_unlock(&file_priv->mm.lock);
4197

4198
	if (target == NULL)
4199
		return 0;
4200

4201
	ret = __i915_wait_request(target, true, NULL, NULL);
4202 4203
	if (ret == 0)
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4204

4205
	i915_gem_request_unreference__unlocked(target);
4206

4207 4208 4209
	return ret;
}

4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225
static bool
i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
{
	struct drm_i915_gem_object *obj = vma->obj;

	if (alignment &&
	    vma->node.start & (alignment - 1))
		return true;

	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
		return true;

	if (flags & PIN_OFFSET_BIAS &&
	    vma->node.start < (flags & PIN_OFFSET_MASK))
		return true;

4226 4227 4228 4229
	if (flags & PIN_OFFSET_FIXED &&
	    vma->node.start != (flags & PIN_OFFSET_MASK))
		return true;

4230 4231 4232
	return false;
}

4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
	bool mappable, fenceable;
	u32 fence_size, fence_alignment;

	fence_size = i915_gem_get_gtt_size(obj->base.dev,
					   obj->base.size,
					   obj->tiling_mode);
	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
						     obj->base.size,
						     obj->tiling_mode,
						     true);

	fenceable = (vma->node.size == fence_size &&
		     (vma->node.start & (fence_alignment - 1)) == 0);

	mappable = (vma->node.start + fence_size <=
4251
		    to_i915(obj->base.dev)->ggtt.mappable_end);
4252 4253 4254 4255

	obj->map_and_fenceable = mappable && fenceable;
}

4256 4257 4258 4259 4260 4261
static int
i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
		       struct i915_address_space *vm,
		       const struct i915_ggtt_view *ggtt_view,
		       uint32_t alignment,
		       uint64_t flags)
4262
{
4263
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4264
	struct i915_vma *vma;
4265
	unsigned bound;
4266 4267
	int ret;

4268 4269 4270
	if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
		return -ENODEV;

4271
	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4272
		return -EINVAL;
4273

4274 4275 4276
	if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
		return -EINVAL;

4277 4278 4279 4280 4281 4282
	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
		return -EINVAL;

	vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
			  i915_gem_obj_to_vma(obj, vm);

4283
	if (vma) {
B
Ben Widawsky 已提交
4284 4285 4286
		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
			return -EBUSY;

4287
		if (i915_vma_misplaced(vma, alignment, flags)) {
B
Ben Widawsky 已提交
4288
			WARN(vma->pin_count,
4289
			     "bo is already pinned in %s with incorrect alignment:"
4290
			     " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
4291
			     " obj->map_and_fenceable=%d\n",
4292
			     ggtt_view ? "ggtt" : "ppgtt",
4293 4294
			     upper_32_bits(vma->node.start),
			     lower_32_bits(vma->node.start),
4295
			     alignment,
4296
			     !!(flags & PIN_MAPPABLE),
4297
			     obj->map_and_fenceable);
4298
			ret = i915_vma_unbind(vma);
4299 4300
			if (ret)
				return ret;
4301 4302

			vma = NULL;
4303 4304 4305
		}
	}

4306
	bound = vma ? vma->bound : 0;
4307
	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4308 4309
		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
						 flags);
4310 4311
		if (IS_ERR(vma))
			return PTR_ERR(vma);
4312 4313
	} else {
		ret = i915_vma_bind(vma, obj->cache_level, flags);
4314 4315 4316
		if (ret)
			return ret;
	}
4317

4318 4319
	if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
	    (bound ^ vma->bound) & GLOBAL_BIND) {
4320
		__i915_vma_set_map_and_fenceable(vma);
4321 4322
		WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
	}
4323

4324
	vma->pin_count++;
4325 4326 4327
	return 0;
}

4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
		    struct i915_address_space *vm,
		    uint32_t alignment,
		    uint64_t flags)
{
	return i915_gem_object_do_pin(obj, vm,
				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
				      alignment, flags);
}

int
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
			 uint32_t alignment,
			 uint64_t flags)
{
4345 4346 4347 4348
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;

4349
	BUG_ON(!view);
4350

4351
	return i915_gem_object_do_pin(obj, &ggtt->base, view,
4352
				      alignment, flags | PIN_GLOBAL);
4353 4354
}

4355
void
4356 4357
i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
				const struct i915_ggtt_view *view)
4358
{
4359
	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4360

4361
	WARN_ON(vma->pin_count == 0);
4362
	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
B
Ben Widawsky 已提交
4363

4364
	--vma->pin_count;
4365 4366 4367 4368
}

int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4369
		    struct drm_file *file)
4370 4371
{
	struct drm_i915_gem_busy *args = data;
4372
	struct drm_i915_gem_object *obj;
4373 4374
	int ret;

4375
	ret = i915_mutex_lock_interruptible(dev);
4376
	if (ret)
4377
		return ret;
4378

4379
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4380
	if (&obj->base == NULL) {
4381 4382
		ret = -ENOENT;
		goto unlock;
4383
	}
4384

4385 4386 4387 4388
	/* Count all active objects as busy, even if they are currently not used
	 * by the gpu. Users of this interface expect objects to eventually
	 * become non-busy without any further actions, therefore emit any
	 * necessary flushes here.
4389
	 */
4390
	ret = i915_gem_object_flush_active(obj);
4391 4392
	if (ret)
		goto unref;
4393

4394 4395 4396 4397
	args->busy = 0;
	if (obj->active) {
		int i;

4398
		for (i = 0; i < I915_NUM_ENGINES; i++) {
4399 4400 4401 4402
			struct drm_i915_gem_request *req;

			req = obj->last_read_req[i];
			if (req)
4403
				args->busy |= 1 << (16 + req->engine->exec_id);
4404 4405
		}
		if (obj->last_write_req)
4406
			args->busy |= obj->last_write_req->engine->exec_id;
4407
	}
4408

4409
unref:
4410
	drm_gem_object_unreference(&obj->base);
4411
unlock:
4412
	mutex_unlock(&dev->struct_mutex);
4413
	return ret;
4414 4415 4416 4417 4418 4419
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4420
	return i915_gem_ring_throttle(dev, file_priv);
4421 4422
}

4423 4424 4425 4426
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4427
	struct drm_i915_private *dev_priv = dev->dev_private;
4428
	struct drm_i915_gem_madvise *args = data;
4429
	struct drm_i915_gem_object *obj;
4430
	int ret;
4431 4432 4433 4434 4435 4436 4437 4438 4439

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4440 4441 4442 4443
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

4444
	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4445
	if (&obj->base == NULL) {
4446 4447
		ret = -ENOENT;
		goto unlock;
4448 4449
	}

B
Ben Widawsky 已提交
4450
	if (i915_gem_obj_is_pinned(obj)) {
4451 4452
		ret = -EINVAL;
		goto out;
4453 4454
	}

4455 4456 4457 4458 4459 4460 4461 4462 4463
	if (obj->pages &&
	    obj->tiling_mode != I915_TILING_NONE &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		if (obj->madv == I915_MADV_WILLNEED)
			i915_gem_object_unpin_pages(obj);
		if (args->madv == I915_MADV_WILLNEED)
			i915_gem_object_pin_pages(obj);
	}

4464 4465
	if (obj->madv != __I915_MADV_PURGED)
		obj->madv = args->madv;
4466

C
Chris Wilson 已提交
4467
	/* if the object is no longer attached, discard its backing storage */
4468
	if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4469 4470
		i915_gem_object_truncate(obj);

4471
	args->retained = obj->madv != __I915_MADV_PURGED;
C
Chris Wilson 已提交
4472

4473
out:
4474
	drm_gem_object_unreference(&obj->base);
4475
unlock:
4476
	mutex_unlock(&dev->struct_mutex);
4477
	return ret;
4478 4479
}

4480 4481
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4482
{
4483 4484
	int i;

4485
	INIT_LIST_HEAD(&obj->global_list);
4486
	for (i = 0; i < I915_NUM_ENGINES; i++)
4487
		INIT_LIST_HEAD(&obj->engine_list[i]);
4488
	INIT_LIST_HEAD(&obj->obj_exec_link);
B
Ben Widawsky 已提交
4489
	INIT_LIST_HEAD(&obj->vma_list);
4490
	INIT_LIST_HEAD(&obj->batch_pool_link);
4491

4492 4493
	obj->ops = ops;

4494 4495 4496 4497 4498 4499
	obj->fence_reg = I915_FENCE_REG_NONE;
	obj->madv = I915_MADV_WILLNEED;

	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}

4500
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4501
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4502 4503 4504 4505
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

4506
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4507
						  size_t size)
4508
{
4509
	struct drm_i915_gem_object *obj;
4510
	struct address_space *mapping;
D
Daniel Vetter 已提交
4511
	gfp_t mask;
4512
	int ret;
4513

4514
	obj = i915_gem_object_alloc(dev);
4515
	if (obj == NULL)
4516
		return ERR_PTR(-ENOMEM);
4517

4518 4519 4520
	ret = drm_gem_object_init(dev, &obj->base, size);
	if (ret)
		goto fail;
4521

4522 4523 4524 4525 4526 4527 4528
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

A
Al Viro 已提交
4529
	mapping = file_inode(obj->base.filp)->i_mapping;
4530
	mapping_set_gfp_mask(mapping, mask);
4531

4532
	i915_gem_object_init(obj, &i915_gem_object_ops);
4533

4534 4535
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4536

4537 4538
	if (HAS_LLC(dev)) {
		/* On some devices, we can have the GPU use the LLC (the CPU
4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

4554 4555
	trace_i915_gem_object_create(obj);

4556
	return obj;
4557 4558 4559 4560 4561

fail:
	i915_gem_object_free(obj);

	return ERR_PTR(ret);
4562 4563
}

4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

	if (obj->madv != I915_MADV_WILLNEED)
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4588
void i915_gem_free_object(struct drm_gem_object *gem_obj)
4589
{
4590
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4591
	struct drm_device *dev = obj->base.dev;
4592
	struct drm_i915_private *dev_priv = dev->dev_private;
4593
	struct i915_vma *vma, *next;
4594

4595 4596
	intel_runtime_pm_get(dev_priv);

4597 4598
	trace_i915_gem_object_destroy(obj);

4599
	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
B
Ben Widawsky 已提交
4600 4601 4602 4603
		int ret;

		vma->pin_count = 0;
		ret = i915_vma_unbind(vma);
4604 4605
		if (WARN_ON(ret == -ERESTARTSYS)) {
			bool was_interruptible;
4606

4607 4608
			was_interruptible = dev_priv->mm.interruptible;
			dev_priv->mm.interruptible = false;
4609

4610
			WARN_ON(i915_vma_unbind(vma));
4611

4612 4613
			dev_priv->mm.interruptible = was_interruptible;
		}
4614 4615
	}

B
Ben Widawsky 已提交
4616 4617 4618 4619 4620
	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
	 * before progressing. */
	if (obj->stolen)
		i915_gem_object_unpin_pages(obj);

4621 4622
	WARN_ON(obj->frontbuffer_bits);

4623 4624 4625 4626 4627
	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
	    obj->tiling_mode != I915_TILING_NONE)
		i915_gem_object_unpin_pages(obj);

B
Ben Widawsky 已提交
4628 4629
	if (WARN_ON(obj->pages_pin_count))
		obj->pages_pin_count = 0;
4630
	if (discard_backing_storage(obj))
4631
		obj->madv = I915_MADV_DONTNEED;
4632
	i915_gem_object_put_pages(obj);
4633
	i915_gem_object_free_mmap_offset(obj);
4634

4635 4636
	BUG_ON(obj->pages);

4637 4638
	if (obj->base.import_attach)
		drm_prime_gem_destroy(&obj->base, NULL);
4639

4640 4641 4642
	if (obj->ops->release)
		obj->ops->release(obj);

4643 4644
	drm_gem_object_release(&obj->base);
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
4645

4646
	kfree(obj->bit_17);
4647
	i915_gem_object_free(obj);
4648 4649

	intel_runtime_pm_put(dev_priv);
4650 4651
}

4652 4653
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
				     struct i915_address_space *vm)
4654 4655
{
	struct i915_vma *vma;
4656
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
4657 4658
		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
		    vma->vm == vm)
4659
			return vma;
4660 4661 4662 4663 4664 4665 4666 4667
	}
	return NULL;
}

struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
					   const struct i915_ggtt_view *view)
{
	struct i915_vma *vma;
4668

4669
	GEM_BUG_ON(!view);
4670

4671
	list_for_each_entry(vma, &obj->vma_list, obj_link)
4672
		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4673
			return vma;
4674 4675 4676
	return NULL;
}

B
Ben Widawsky 已提交
4677 4678 4679
void i915_gem_vma_destroy(struct i915_vma *vma)
{
	WARN_ON(vma->node.allocated);
4680 4681 4682 4683 4684

	/* Keep the vma as a placeholder in the execbuffer reservation lists */
	if (!list_empty(&vma->exec_list))
		return;

4685 4686
	if (!vma->is_ggtt)
		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
4687

4688
	list_del(&vma->obj_link);
4689

4690
	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
B
Ben Widawsky 已提交
4691 4692
}

4693
static void
4694
i915_gem_stop_engines(struct drm_device *dev)
4695 4696
{
	struct drm_i915_private *dev_priv = dev->dev_private;
4697
	struct intel_engine_cs *engine;
4698

4699
	for_each_engine(engine, dev_priv)
4700
		dev_priv->gt.stop_engine(engine);
4701 4702
}

4703
int
4704
i915_gem_suspend(struct drm_device *dev)
4705
{
4706
	struct drm_i915_private *dev_priv = dev->dev_private;
4707
	int ret = 0;
4708

4709
	mutex_lock(&dev->struct_mutex);
4710
	ret = i915_gpu_idle(dev);
4711
	if (ret)
4712
		goto err;
4713

4714
	i915_gem_retire_requests(dev);
4715

4716
	i915_gem_stop_engines(dev);
4717
	i915_gem_context_lost(dev_priv);
4718 4719
	mutex_unlock(&dev->struct_mutex);

4720
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4721
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4722
	flush_delayed_work(&dev_priv->mm.idle_work);
4723

4724 4725 4726 4727 4728
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
	WARN_ON(dev_priv->mm.busy);

4729
	return 0;
4730 4731 4732 4733

err:
	mutex_unlock(&dev->struct_mutex);
	return ret;
4734 4735
}

4736 4737
void i915_gem_init_swizzling(struct drm_device *dev)
{
4738
	struct drm_i915_private *dev_priv = dev->dev_private;
4739

4740
	if (INTEL_INFO(dev)->gen < 5 ||
4741 4742 4743 4744 4745 4746
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4747 4748 4749
	if (IS_GEN5(dev))
		return;

4750 4751
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
	if (IS_GEN6(dev))
4752
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4753
	else if (IS_GEN7(dev))
4754
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
B
Ben Widawsky 已提交
4755 4756
	else if (IS_GEN8(dev))
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4757 4758
	else
		BUG();
4759
}
D
Daniel Vetter 已提交
4760

4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787
static void init_unused_ring(struct drm_device *dev, u32 base)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

static void init_unused_rings(struct drm_device *dev)
{
	if (IS_I830(dev)) {
		init_unused_ring(dev, PRB1_BASE);
		init_unused_ring(dev, SRB0_BASE);
		init_unused_ring(dev, SRB1_BASE);
		init_unused_ring(dev, SRB2_BASE);
		init_unused_ring(dev, SRB3_BASE);
	} else if (IS_GEN2(dev)) {
		init_unused_ring(dev, SRB0_BASE);
		init_unused_ring(dev, SRB1_BASE);
	} else if (IS_GEN3(dev)) {
		init_unused_ring(dev, PRB1_BASE);
		init_unused_ring(dev, PRB2_BASE);
	}
}

4788
int i915_gem_init_engines(struct drm_device *dev)
4789
{
4790
	struct drm_i915_private *dev_priv = dev->dev_private;
4791
	int ret;
4792

4793
	ret = intel_init_render_ring_buffer(dev);
4794
	if (ret)
4795
		return ret;
4796 4797

	if (HAS_BSD(dev)) {
4798
		ret = intel_init_bsd_ring_buffer(dev);
4799 4800
		if (ret)
			goto cleanup_render_ring;
4801
	}
4802

4803
	if (HAS_BLT(dev)) {
4804 4805 4806 4807 4808
		ret = intel_init_blt_ring_buffer(dev);
		if (ret)
			goto cleanup_bsd_ring;
	}

B
Ben Widawsky 已提交
4809 4810 4811 4812 4813 4814
	if (HAS_VEBOX(dev)) {
		ret = intel_init_vebox_ring_buffer(dev);
		if (ret)
			goto cleanup_blt_ring;
	}

4815 4816 4817 4818 4819
	if (HAS_BSD2(dev)) {
		ret = intel_init_bsd2_ring_buffer(dev);
		if (ret)
			goto cleanup_vebox_ring;
	}
B
Ben Widawsky 已提交
4820

4821 4822
	return 0;

B
Ben Widawsky 已提交
4823
cleanup_vebox_ring:
4824
	intel_cleanup_engine(&dev_priv->engine[VECS]);
4825
cleanup_blt_ring:
4826
	intel_cleanup_engine(&dev_priv->engine[BCS]);
4827
cleanup_bsd_ring:
4828
	intel_cleanup_engine(&dev_priv->engine[VCS]);
4829
cleanup_render_ring:
4830
	intel_cleanup_engine(&dev_priv->engine[RCS]);
4831 4832 4833 4834 4835 4836 4837

	return ret;
}

int
i915_gem_init_hw(struct drm_device *dev)
{
4838
	struct drm_i915_private *dev_priv = dev->dev_private;
4839
	struct intel_engine_cs *engine;
C
Chris Wilson 已提交
4840
	int ret;
4841 4842 4843 4844

	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
		return -EIO;

4845 4846 4847
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4848
	if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4849
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4850

4851 4852 4853
	if (IS_HASWELL(dev))
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4854

4855
	if (HAS_PCH_NOP(dev)) {
4856 4857 4858 4859 4860 4861 4862 4863 4864
		if (IS_IVYBRIDGE(dev)) {
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
		} else if (INTEL_INFO(dev)->gen >= 7) {
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4865 4866
	}

4867 4868
	i915_gem_init_swizzling(dev);

4869 4870 4871 4872 4873 4874 4875 4876
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
	init_unused_rings(dev);

4877
	BUG_ON(!dev_priv->kernel_context);
4878

4879 4880 4881 4882 4883 4884 4885
	ret = i915_ppgtt_init_hw(dev);
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

	/* Need to do basic initialisation of all rings first: */
4886
	for_each_engine(engine, dev_priv) {
4887
		ret = engine->init_hw(engine);
D
Daniel Vetter 已提交
4888
		if (ret)
4889
			goto out;
D
Daniel Vetter 已提交
4890
	}
4891

4892 4893
	intel_mocs_init_l3cc_table(dev);

4894
	/* We can't enable contexts until all firmware is loaded */
4895 4896 4897
	if (HAS_GUC_UCODE(dev)) {
		ret = intel_guc_ucode_load(dev);
		if (ret) {
4898 4899 4900
			DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
			ret = -EIO;
			goto out;
4901
		}
4902 4903
	}

4904 4905 4906 4907 4908
	/*
	 * Increment the next seqno by 0x100 so we have a visible break
	 * on re-initialisation
	 */
	ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
D
Daniel Vetter 已提交
4909

4910 4911
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4912
	return ret;
4913 4914
}

4915 4916 4917 4918 4919
int i915_gem_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

4920 4921 4922
	i915.enable_execlists = intel_sanitize_enable_execlists(dev,
			i915.enable_execlists);

4923
	mutex_lock(&dev->struct_mutex);
4924

4925
	if (!i915.enable_execlists) {
4926
		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
4927 4928 4929
		dev_priv->gt.init_engines = i915_gem_init_engines;
		dev_priv->gt.cleanup_engine = intel_cleanup_engine;
		dev_priv->gt.stop_engine = intel_stop_engine;
4930
	} else {
4931
		dev_priv->gt.execbuf_submit = intel_execlists_submission;
4932 4933 4934
		dev_priv->gt.init_engines = intel_logical_rings_init;
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
		dev_priv->gt.stop_engine = intel_logical_ring_stop;
4935 4936
	}

4937 4938 4939 4940 4941 4942 4943 4944
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4945
	ret = i915_gem_init_userptr(dev);
4946 4947
	if (ret)
		goto out_unlock;
4948

4949
	i915_gem_init_ggtt(dev);
4950

4951
	ret = i915_gem_context_init(dev);
4952 4953
	if (ret)
		goto out_unlock;
4954

4955
	ret = dev_priv->gt.init_engines(dev);
D
Daniel Vetter 已提交
4956
	if (ret)
4957
		goto out_unlock;
4958

4959
	ret = i915_gem_init_hw(dev);
4960 4961 4962 4963 4964 4965
	if (ret == -EIO) {
		/* Allow ring initialisation to fail by marking the GPU as
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4966
		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4967
		ret = 0;
4968
	}
4969 4970

out_unlock:
4971
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4972
	mutex_unlock(&dev->struct_mutex);
4973

4974
	return ret;
4975 4976
}

4977
void
4978
i915_gem_cleanup_engines(struct drm_device *dev)
4979
{
4980
	struct drm_i915_private *dev_priv = dev->dev_private;
4981
	struct intel_engine_cs *engine;
4982

4983
	for_each_engine(engine, dev_priv)
4984
		dev_priv->gt.cleanup_engine(engine);
4985

4986 4987 4988 4989 4990 4991 4992
	if (i915.enable_execlists)
		/*
		 * Neither the BIOS, ourselves or any other kernel
		 * expects the system to be in execlists mode on startup,
		 * so we need to reset the GPU back to legacy mode.
		 */
		intel_gpu_reset(dev, ALL_ENGINES);
4993 4994
}

4995
static void
4996
init_engine_lists(struct intel_engine_cs *engine)
4997
{
4998 4999
	INIT_LIST_HEAD(&engine->active_list);
	INIT_LIST_HEAD(&engine->request_list);
5000 5001
}

5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
	else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
		 IS_I945GM(dev_priv) || IS_G33(dev_priv))
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

	if (intel_vgpu_active(dev))
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
	i915_gem_restore_fences(dev);

	i915_gem_detect_bit_6_swizzle(dev);
}

5026
void
5027
i915_gem_load_init(struct drm_device *dev)
5028
{
5029
	struct drm_i915_private *dev_priv = dev->dev_private;
5030 5031
	int i;

5032
	dev_priv->objects =
5033 5034 5035 5036
		kmem_cache_create("i915_gem_object",
				  sizeof(struct drm_i915_gem_object), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
5037 5038 5039 5040 5041
	dev_priv->vmas =
		kmem_cache_create("i915_gem_vma",
				  sizeof(struct i915_vma), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
5042 5043 5044 5045 5046
	dev_priv->requests =
		kmem_cache_create("i915_gem_request",
				  sizeof(struct drm_i915_gem_request), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
5047

B
Ben Widawsky 已提交
5048
	INIT_LIST_HEAD(&dev_priv->vm_list);
5049
	INIT_LIST_HEAD(&dev_priv->context_list);
C
Chris Wilson 已提交
5050 5051
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5052
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5053 5054
	for (i = 0; i < I915_NUM_ENGINES; i++)
		init_engine_lists(&dev_priv->engine[i]);
5055
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5056
		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5057 5058
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
			  i915_gem_retire_work_handler);
5059 5060
	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
			  i915_gem_idle_work_handler);
5061
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5062

5063 5064
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

5065 5066 5067 5068 5069 5070 5071 5072
	/*
	 * Set initial sequence number for requests.
	 * Using this number allows the wraparound to happen early,
	 * catching any obvious problems.
	 */
	dev_priv->next_seqno = ((u32)~0 - 0x1100);
	dev_priv->last_seqno = ((u32)~0 - 0x1101);

5073
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5074

5075
	init_waitqueue_head(&dev_priv->pending_flip_queue);
5076

5077 5078
	dev_priv->mm.interruptible = true;

5079
	mutex_init(&dev_priv->fb_tracking.lock);
5080
}
5081

5082 5083 5084 5085 5086 5087 5088 5089 5090
void i915_gem_load_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	kmem_cache_destroy(dev_priv->requests);
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
}

5091
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5092
{
5093
	struct drm_i915_file_private *file_priv = file->driver_priv;
5094 5095 5096 5097 5098

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
5099
	spin_lock(&file_priv->mm.lock);
5100 5101 5102 5103 5104 5105 5106 5107 5108
	while (!list_empty(&file_priv->mm.request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&file_priv->mm.request_list,
					   struct drm_i915_gem_request,
					   client_list);
		list_del(&request->client_list);
		request->file_priv = NULL;
	}
5109
	spin_unlock(&file_priv->mm.lock);
5110

5111
	if (!list_empty(&file_priv->rps.link)) {
5112
		spin_lock(&to_i915(dev)->rps.client_lock);
5113
		list_del(&file_priv->rps.link);
5114
		spin_unlock(&to_i915(dev)->rps.client_lock);
5115
	}
5116 5117 5118 5119 5120
}

int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;
5121
	int ret;
5122 5123 5124 5125 5126 5127 5128 5129 5130

	DRM_DEBUG_DRIVER("\n");

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
	file_priv->dev_priv = dev->dev_private;
5131
	file_priv->file = file;
5132
	INIT_LIST_HEAD(&file_priv->rps.link);
5133 5134 5135 5136

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

5137 5138
	file_priv->bsd_ring = -1;

5139 5140 5141
	ret = i915_gem_context_open(dev, file);
	if (ret)
		kfree(file_priv);
5142

5143
	return ret;
5144 5145
}

5146 5147
/**
 * i915_gem_track_fb - update frontbuffer tracking
5148 5149 5150
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
5151 5152 5153 5154
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
	if (old) {
		WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
		WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
		old->frontbuffer_bits &= ~frontbuffer_bits;
	}

	if (new) {
		WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
		WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
		new->frontbuffer_bits |= frontbuffer_bits;
	}
}

5172
/* All the new VM stuff */
5173 5174
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
			struct i915_address_space *vm)
5175 5176 5177 5178
{
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
	struct i915_vma *vma;

5179
	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5180

5181
	list_for_each_entry(vma, &o->vma_list, obj_link) {
5182
		if (vma->is_ggtt &&
5183 5184 5185
		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
			continue;
		if (vma->vm == vm)
5186 5187
			return vma->node.start;
	}
5188

5189 5190
	WARN(1, "%s vma for this object not found.\n",
	     i915_is_ggtt(vm) ? "global" : "ppgtt");
5191 5192 5193
	return -1;
}

5194 5195
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
				  const struct i915_ggtt_view *view)
5196 5197 5198
{
	struct i915_vma *vma;

5199
	list_for_each_entry(vma, &o->vma_list, obj_link)
5200
		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
5201 5202
			return vma->node.start;

5203
	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
5204 5205 5206 5207 5208 5209 5210 5211
	return -1;
}

bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
			struct i915_address_space *vm)
{
	struct i915_vma *vma;

5212
	list_for_each_entry(vma, &o->vma_list, obj_link) {
5213
		if (vma->is_ggtt &&
5214 5215 5216 5217 5218 5219 5220 5221 5222 5223
		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
			continue;
		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
			return true;
	}

	return false;
}

bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5224
				  const struct i915_ggtt_view *view)
5225 5226 5227
{
	struct i915_vma *vma;

5228
	list_for_each_entry(vma, &o->vma_list, obj_link)
5229
		if (vma->is_ggtt &&
5230
		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5231
		    drm_mm_node_allocated(&vma->node))
5232 5233 5234 5235 5236 5237 5238
			return true;

	return false;
}

bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
5239
	struct i915_vma *vma;
5240

5241
	list_for_each_entry(vma, &o->vma_list, obj_link)
5242
		if (drm_mm_node_allocated(&vma->node))
5243 5244 5245 5246 5247
			return true;

	return false;
}

5248
unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
5249 5250 5251
{
	struct i915_vma *vma;

5252
	GEM_BUG_ON(list_empty(&o->vma_list));
5253

5254
	list_for_each_entry(vma, &o->vma_list, obj_link) {
5255
		if (vma->is_ggtt &&
5256
		    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
5257
			return vma->node.size;
5258
	}
5259

5260 5261 5262
	return 0;
}

5263
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5264 5265
{
	struct i915_vma *vma;
5266
	list_for_each_entry(vma, &obj->vma_list, obj_link)
5267 5268
		if (vma->pin_count > 0)
			return true;
5269

5270
	return false;
5271
}
5272

5273 5274 5275 5276 5277 5278 5279
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
{
	struct page *page;

	/* Only default objects have per-page dirty tracking */
5280
	if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
5281 5282 5283 5284 5285 5286 5287
		return NULL;

	page = i915_gem_object_get_page(obj, n);
	set_page_dirty(page);
	return page;
}

5288 5289 5290 5291 5292 5293 5294 5295 5296 5297
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
	struct sg_table *sg;
	size_t bytes;
	int ret;

5298
	obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
5299
	if (IS_ERR(obj))
5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312
		return obj;

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret)
		goto fail;

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		goto fail;

	i915_gem_object_pin_pages(obj);
	sg = obj->pages;
	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
5313
	obj->dirty = 1;		/* Backing store is now out of date */
5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327
	i915_gem_object_unpin_pages(obj);

	if (WARN_ON(bytes != size)) {
		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
		ret = -EFAULT;
		goto fail;
	}

	return obj;

fail:
	drm_gem_object_unreference(&obj->base);
	return ERR_PTR(ret);
}