i915_gem.c 132.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
C
Chris Wilson 已提交
32
#include "i915_trace.h"
33
#include "intel_drv.h"
34
#include <linux/oom.h>
35
#include <linux/shmem_fs.h>
36
#include <linux/slab.h>
37
#include <linux/swap.h>
J
Jesse Barnes 已提交
38
#include <linux/pci.h>
39
#include <linux/dma-buf.h>
40

41
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 43
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
						   bool force);
44
static __must_check int
45 46
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly);
47 48 49
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj);

50 51 52 53 54 55
static void i915_gem_write_fence(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj);
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
					 struct drm_i915_fence_reg *fence,
					 bool enable);

56
static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
57
					     struct shrink_control *sc);
58
static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
59
					    struct shrink_control *sc);
60 61 62
static int i915_gem_shrinker_oom(struct notifier_block *nb,
				 unsigned long event,
				 void *ptr);
63 64
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
65

66 67 68 69 70 71
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
}

72 73 74 75 76 77 78 79
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

80 81 82 83 84 85 86 87
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
	if (obj->tiling_mode)
		i915_gem_release_mmap(obj);

	/* As we do not have an associated fence register, we will force
	 * a tiling change if we ever need to acquire one.
	 */
88
	obj->fence_dirty = false;
89 90 91
	obj->fence_reg = I915_FENCE_REG_NONE;
}

92 93 94 95
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
96
	spin_lock(&dev_priv->mm.object_stat_lock);
97 98
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
99
	spin_unlock(&dev_priv->mm.object_stat_lock);
100 101 102 103 104
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
105
	spin_lock(&dev_priv->mm.object_stat_lock);
106 107
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
108
	spin_unlock(&dev_priv->mm.object_stat_lock);
109 110
}

111
static int
112
i915_gem_wait_for_error(struct i915_gpu_error *error)
113 114 115
{
	int ret;

116 117
#define EXIT_COND (!i915_reset_in_progress(error) || \
		   i915_terminally_wedged(error))
118
	if (EXIT_COND)
119 120
		return 0;

121 122 123 124 125
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
126 127 128
	ret = wait_event_interruptible_timeout(error->reset_queue,
					       EXIT_COND,
					       10*HZ);
129 130 131 132
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
133
		return ret;
134
	}
135
#undef EXIT_COND
136

137
	return 0;
138 139
}

140
int i915_mutex_lock_interruptible(struct drm_device *dev)
141
{
142
	struct drm_i915_private *dev_priv = dev->dev_private;
143 144
	int ret;

145
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
146 147 148 149 150 151 152
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

153
	WARN_ON(i915_verify_lists(dev));
154 155
	return 0;
}
156

157
static inline bool
158
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
159
{
160
	return i915_gem_obj_bound_any(obj) && !obj->active;
161 162
}

J
Jesse Barnes 已提交
163 164
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
165
		    struct drm_file *file)
J
Jesse Barnes 已提交
166
{
167
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
168
	struct drm_i915_gem_init *args = data;
169

170 171 172
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

173 174 175
	if (args->gtt_start >= args->gtt_end ||
	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
		return -EINVAL;
J
Jesse Barnes 已提交
176

177 178 179 180
	/* GEM with user mode setting was never supported on ilk and later. */
	if (INTEL_INFO(dev)->gen >= 5)
		return -ENODEV;

J
Jesse Barnes 已提交
181
	mutex_lock(&dev->struct_mutex);
182 183
	i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
				  args->gtt_end);
184
	dev_priv->gtt.mappable_end = args->gtt_end;
185 186
	mutex_unlock(&dev->struct_mutex);

187
	return 0;
188 189
}

190 191
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
192
			    struct drm_file *file)
193
{
194
	struct drm_i915_private *dev_priv = dev->dev_private;
195
	struct drm_i915_gem_get_aperture *args = data;
196 197
	struct drm_i915_gem_object *obj;
	size_t pinned;
198

199
	pinned = 0;
200
	mutex_lock(&dev->struct_mutex);
201
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
B
Ben Widawsky 已提交
202
		if (i915_gem_obj_is_pinned(obj))
203
			pinned += i915_gem_obj_ggtt_size(obj);
204
	mutex_unlock(&dev->struct_mutex);
205

206
	args->aper_size = dev_priv->gtt.base.total;
207
	args->aper_available_size = args->aper_size - pinned;
208

209 210 211
	return 0;
}

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
{
	drm_dma_handle_t *phys = obj->phys_handle;

	if (!phys)
		return;

	if (obj->madv == I915_MADV_WILLNEED) {
		struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
		char *vaddr = phys->vaddr;
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
			struct page *page = shmem_read_mapping_page(mapping, i);
			if (!IS_ERR(page)) {
				char *dst = kmap_atomic(page);
				memcpy(dst, vaddr, PAGE_SIZE);
				drm_clflush_virt_range(dst, PAGE_SIZE);
				kunmap_atomic(dst);

				set_page_dirty(page);
				mark_page_accessed(page);
				page_cache_release(page);
			}
			vaddr += PAGE_SIZE;
		}
		i915_gem_chipset_flush(obj->base.dev);
	}

#ifdef CONFIG_X86
	set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
#endif
	drm_pci_free(obj->base.dev, phys);
	obj->phys_handle = NULL;
}

int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
	drm_dma_handle_t *phys;
	struct address_space *mapping;
	char *vaddr;
	int i;

	if (obj->phys_handle) {
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
			return -EBUSY;

		return 0;
	}

	if (obj->madv != I915_MADV_WILLNEED)
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
	if (!phys)
		return -ENOMEM;

	vaddr = phys->vaddr;
#ifdef CONFIG_X86
	set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
#endif
	mapping = file_inode(obj->base.filp)->i_mapping;
	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page)) {
#ifdef CONFIG_X86
			set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
#endif
			drm_pci_free(obj->base.dev, phys);
			return PTR_ERR(page);
		}

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		kunmap_atomic(src);

		mark_page_accessed(page);
		page_cache_release(page);

		vaddr += PAGE_SIZE;
	}

	obj->phys_handle = phys;
	return 0;
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
	char __user *user_data = to_user_ptr(args->data_ptr);

	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
		if (unwritten)
			return -EFAULT;
	}

	i915_gem_chipset_flush(dev);
	return 0;
}

334 335 336
void *i915_gem_object_alloc(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
337
	return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
338 339 340 341 342 343 344 345
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	kmem_cache_free(dev_priv->slab, obj);
}

346 347 348 349 350
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
351
{
352
	struct drm_i915_gem_object *obj;
353 354
	int ret;
	u32 handle;
355

356
	size = roundup(size, PAGE_SIZE);
357 358
	if (size == 0)
		return -EINVAL;
359 360

	/* Allocate the new object */
361
	obj = i915_gem_alloc_object(dev, size);
362 363 364
	if (obj == NULL)
		return -ENOMEM;

365
	ret = drm_gem_handle_create(file, &obj->base, &handle);
366
	/* drop reference from allocate - handle holds it now */
367 368 369
	drm_gem_object_unreference_unlocked(&obj->base);
	if (ret)
		return ret;
370

371
	*handle_p = handle;
372 373 374
	return 0;
}

375 376 377 378 379 380
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
381
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
382 383 384 385 386 387 388 389 390 391 392 393 394
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
			       args->size, &args->handle);
}

/**
 * Creates a new mm object and returns a handle to it.
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
395

396 397 398 399
	return i915_gem_create(file, dev,
			       args->size, &args->handle);
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

426
static inline int
427 428
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
				    int *needs_clflush)
{
	int ret;

	*needs_clflush = 0;

	if (!obj->base.filp)
		return -EINVAL;

	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
		/* If we're not in the cpu read domain, set ourself into the gtt
		 * read domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will dirty the data
		 * anyway again before the next pread happens. */
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
		ret = i915_gem_object_wait_rendering(obj, true);
		if (ret)
			return ret;
477 478

		i915_gem_object_retire(obj);
479 480 481 482 483 484 485 486 487 488 489
	}

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

	return ret;
}

490 491 492
/* Per-page copy function for the shmem pread fastpath.
 * Flushes invalid cachelines before reading the target if
 * needs_clflush is set. */
493
static int
494 495 496 497 498 499 500
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

501
	if (unlikely(page_do_bit17_swizzling))
502 503 504 505 506 507 508 509 510 511 512
		return -EINVAL;

	vaddr = kmap_atomic(page);
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_to_user_inatomic(user_data,
				      vaddr + shmem_page_offset,
				      page_length);
	kunmap_atomic(vaddr);

513
	return ret ? -EFAULT : 0;
514 515
}

516 517 518 519
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
520
	if (unlikely(swizzled)) {
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

538 539 540 541 542 543 544 545 546 547 548 549
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
550 551 552
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
553 554 555 556 557 558 559 560 561 562 563

	if (page_do_bit17_swizzling)
		ret = __copy_to_user_swizzled(user_data,
					      vaddr, shmem_page_offset,
					      page_length);
	else
		ret = __copy_to_user(user_data,
				     vaddr + shmem_page_offset,
				     page_length);
	kunmap(page);

564
	return ret ? - EFAULT : 0;
565 566
}

567
static int
568 569 570 571
i915_gem_shmem_pread(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args,
		     struct drm_file *file)
572
{
573
	char __user *user_data;
574
	ssize_t remain;
575
	loff_t offset;
576
	int shmem_page_offset, page_length, ret = 0;
577
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
578
	int prefaulted = 0;
579
	int needs_clflush = 0;
580
	struct sg_page_iter sg_iter;
581

V
Ville Syrjälä 已提交
582
	user_data = to_user_ptr(args->data_ptr);
583 584
	remain = args->size;

585
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
586

587
	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
588 589 590
	if (ret)
		return ret;

591
	offset = args->offset;
592

593 594
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
595
		struct page *page = sg_page_iter_page(&sg_iter);
596 597 598 599

		if (remain <= 0)
			break;

600 601 602 603 604
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
605
		shmem_page_offset = offset_in_page(offset);
606 607 608 609
		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

610 611 612
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

613 614 615 616 617
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
		if (ret == 0)
			goto next_page;
618 619 620

		mutex_unlock(&dev->struct_mutex);

621
		if (likely(!i915.prefault_disable) && !prefaulted) {
622
			ret = fault_in_multipages_writeable(user_data, remain);
623 624 625 626 627 628 629
			/* Userspace is tricking us, but we've already clobbered
			 * its pages with the prefault and promised to write the
			 * data up to the first fault. Hence ignore any errors
			 * and just continue. */
			(void)ret;
			prefaulted = 1;
		}
630

631 632 633
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
634

635
		mutex_lock(&dev->struct_mutex);
636 637

		if (ret)
638 639
			goto out;

640
next_page:
641
		remain -= page_length;
642
		user_data += page_length;
643 644 645
		offset += page_length;
	}

646
out:
647 648
	i915_gem_object_unpin_pages(obj);

649 650 651
	return ret;
}

652 653 654 655 656 657 658
/**
 * Reads data from the object referenced by handle.
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
659
		     struct drm_file *file)
660 661
{
	struct drm_i915_gem_pread *args = data;
662
	struct drm_i915_gem_object *obj;
663
	int ret = 0;
664

665 666 667 668
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
V
Ville Syrjälä 已提交
669
		       to_user_ptr(args->data_ptr),
670 671 672
		       args->size))
		return -EFAULT;

673
	ret = i915_mutex_lock_interruptible(dev);
674
	if (ret)
675
		return ret;
676

677
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
678
	if (&obj->base == NULL) {
679 680
		ret = -ENOENT;
		goto unlock;
681
	}
682

683
	/* Bounds check source.  */
684 685
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
686
		ret = -EINVAL;
687
		goto out;
C
Chris Wilson 已提交
688 689
	}

690 691 692 693 694 695 696 697
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

C
Chris Wilson 已提交
698 699
	trace_i915_gem_object_pread(obj, args->offset, args->size);

700
	ret = i915_gem_shmem_pread(dev, obj, args, file);
701

702
out:
703
	drm_gem_object_unreference(&obj->base);
704
unlock:
705
	mutex_unlock(&dev->struct_mutex);
706
	return ret;
707 708
}

709 710
/* This is the fast write path which cannot handle
 * page faults in the source data
711
 */
712 713 714 715 716 717

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
718
{
719 720
	void __iomem *vaddr_atomic;
	void *vaddr;
721
	unsigned long unwritten;
722

P
Peter Zijlstra 已提交
723
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
724 725 726
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force*)vaddr_atomic + page_offset;
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
727
						      user_data, length);
P
Peter Zijlstra 已提交
728
	io_mapping_unmap_atomic(vaddr_atomic);
729
	return unwritten;
730 731
}

732 733 734 735
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 */
736
static int
737 738
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
739
			 struct drm_i915_gem_pwrite *args,
740
			 struct drm_file *file)
741
{
742
	struct drm_i915_private *dev_priv = dev->dev_private;
743
	ssize_t remain;
744
	loff_t offset, page_base;
745
	char __user *user_data;
D
Daniel Vetter 已提交
746 747
	int page_offset, page_length, ret;

748
	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
D
Daniel Vetter 已提交
749 750 751 752 753 754 755 756 757 758
	if (ret)
		goto out;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		goto out_unpin;
759

V
Ville Syrjälä 已提交
760
	user_data = to_user_ptr(args->data_ptr);
761 762
	remain = args->size;

763
	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
764 765 766 767

	while (remain > 0) {
		/* Operation in this page
		 *
768 769 770
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
771
		 */
772 773
		page_base = offset & PAGE_MASK;
		page_offset = offset_in_page(offset);
774 775 776 777 778
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

		/* If we get a fault while copying data, then (presumably) our
779 780
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
781
		 */
B
Ben Widawsky 已提交
782
		if (fast_user_write(dev_priv->gtt.mappable, page_base,
D
Daniel Vetter 已提交
783 784 785 786
				    page_offset, user_data, page_length)) {
			ret = -EFAULT;
			goto out_unpin;
		}
787

788 789 790
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
791 792
	}

D
Daniel Vetter 已提交
793
out_unpin:
B
Ben Widawsky 已提交
794
	i915_gem_object_ggtt_unpin(obj);
D
Daniel Vetter 已提交
795
out:
796
	return ret;
797 798
}

799 800 801 802
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set. */
803
static int
804 805 806 807 808
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
809
{
810
	char *vaddr;
811
	int ret;
812

813
	if (unlikely(page_do_bit17_swizzling))
814
		return -EINVAL;
815

816 817 818 819
	vaddr = kmap_atomic(page);
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
820 821
	ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
					user_data, page_length);
822 823 824 825
	if (needs_clflush_after)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	kunmap_atomic(vaddr);
826

827
	return ret ? -EFAULT : 0;
828 829
}

830 831
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
832
static int
833 834 835 836 837
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
838
{
839 840
	char *vaddr;
	int ret;
841

842
	vaddr = kmap(page);
843
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
844 845 846
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
847 848
	if (page_do_bit17_swizzling)
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
849 850
						user_data,
						page_length);
851 852 853 854 855
	else
		ret = __copy_from_user(vaddr + shmem_page_offset,
				       user_data,
				       page_length);
	if (needs_clflush_after)
856 857 858
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
859
	kunmap(page);
860

861
	return ret ? -EFAULT : 0;
862 863 864
}

static int
865 866 867 868
i915_gem_shmem_pwrite(struct drm_device *dev,
		      struct drm_i915_gem_object *obj,
		      struct drm_i915_gem_pwrite *args,
		      struct drm_file *file)
869 870
{
	ssize_t remain;
871 872
	loff_t offset;
	char __user *user_data;
873
	int shmem_page_offset, page_length, ret = 0;
874
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
875
	int hit_slowpath = 0;
876 877
	int needs_clflush_after = 0;
	int needs_clflush_before = 0;
878
	struct sg_page_iter sg_iter;
879

V
Ville Syrjälä 已提交
880
	user_data = to_user_ptr(args->data_ptr);
881 882
	remain = args->size;

883
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
884

885 886 887 888 889
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
		/* If we're not in the cpu write domain, set ourself into the gtt
		 * write domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will use the data
		 * right away and we therefore have to clflush anyway. */
890
		needs_clflush_after = cpu_write_needs_clflush(obj);
891 892 893
		ret = i915_gem_object_wait_rendering(obj, false);
		if (ret)
			return ret;
894 895

		i915_gem_object_retire(obj);
896
	}
897 898 899 900 901
	/* Same trick applies to invalidate partially written cachelines read
	 * before writing. */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
		needs_clflush_before =
			!cpu_cache_is_coherent(dev, obj->cache_level);
902

903 904 905 906 907 908
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

909
	offset = args->offset;
910
	obj->dirty = 1;
911

912 913
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
914
		struct page *page = sg_page_iter_page(&sg_iter);
915
		int partial_cacheline_write;
916

917 918 919
		if (remain <= 0)
			break;

920 921 922 923 924
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
925
		shmem_page_offset = offset_in_page(offset);
926 927 928 929 930

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

931 932 933 934 935 936 937
		/* If we don't overwrite a cacheline completely we need to be
		 * careful to have up-to-date data by first clflushing. Don't
		 * overcomplicate things and flush the entire patch. */
		partial_cacheline_write = needs_clflush_before &&
			((shmem_page_offset | page_length)
				& (boot_cpu_data.x86_clflush_size - 1));

938 939 940
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

941 942 943 944 945 946
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
		if (ret == 0)
			goto next_page;
947 948 949

		hit_slowpath = 1;
		mutex_unlock(&dev->struct_mutex);
950 951 952 953
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
954

955
		mutex_lock(&dev->struct_mutex);
956 957

		if (ret)
958 959
			goto out;

960
next_page:
961
		remain -= page_length;
962
		user_data += page_length;
963
		offset += page_length;
964 965
	}

966
out:
967 968
	i915_gem_object_unpin_pages(obj);

969
	if (hit_slowpath) {
970 971 972 973 974 975 976
		/*
		 * Fixup: Flush cpu caches in case we didn't flush the dirty
		 * cachelines in-line while writing and the object moved
		 * out of the cpu write domain while we've dropped the lock.
		 */
		if (!needs_clflush_after &&
		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
977 978
			if (i915_gem_clflush_object(obj, obj->pin_display))
				i915_gem_chipset_flush(dev);
979
		}
980
	}
981

982
	if (needs_clflush_after)
983
		i915_gem_chipset_flush(dev);
984

985
	return ret;
986 987 988 989 990 991 992 993 994
}

/**
 * Writes data to the object referenced by handle.
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
995
		      struct drm_file *file)
996 997
{
	struct drm_i915_gem_pwrite *args = data;
998
	struct drm_i915_gem_object *obj;
999 1000 1001 1002 1003 1004
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
V
Ville Syrjälä 已提交
1005
		       to_user_ptr(args->data_ptr),
1006 1007 1008
		       args->size))
		return -EFAULT;

1009
	if (likely(!i915.prefault_disable)) {
1010 1011 1012 1013 1014
		ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
						   args->size);
		if (ret)
			return -EFAULT;
	}
1015

1016
	ret = i915_mutex_lock_interruptible(dev);
1017
	if (ret)
1018
		return ret;
1019

1020
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1021
	if (&obj->base == NULL) {
1022 1023
		ret = -ENOENT;
		goto unlock;
1024
	}
1025

1026
	/* Bounds check destination. */
1027 1028
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1029
		ret = -EINVAL;
1030
		goto out;
C
Chris Wilson 已提交
1031 1032
	}

1033 1034 1035 1036 1037 1038 1039 1040
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

C
Chris Wilson 已提交
1041 1042
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

D
Daniel Vetter 已提交
1043
	ret = -EFAULT;
1044 1045 1046 1047 1048 1049
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1050 1051
	if (obj->phys_handle) {
		ret = i915_gem_phys_pwrite(obj, args, file);
1052 1053 1054
		goto out;
	}

1055 1056 1057
	if (obj->tiling_mode == I915_TILING_NONE &&
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
	    cpu_write_needs_clflush(obj)) {
1058
		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
D
Daniel Vetter 已提交
1059 1060 1061
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
		 * textures). Fallback to the shmem path in that case. */
1062
	}
1063

1064
	if (ret == -EFAULT || ret == -ENOSPC)
D
Daniel Vetter 已提交
1065
		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1066

1067
out:
1068
	drm_gem_object_unreference(&obj->base);
1069
unlock:
1070
	mutex_unlock(&dev->struct_mutex);
1071 1072 1073
	return ret;
}

1074
int
1075
i915_gem_check_wedge(struct i915_gpu_error *error,
1076 1077
		     bool interruptible)
{
1078
	if (i915_reset_in_progress(error)) {
1079 1080 1081 1082 1083
		/* Non-interruptible callers can't handle -EAGAIN, hence return
		 * -EIO unconditionally for these. */
		if (!interruptible)
			return -EIO;

1084 1085
		/* Recovery complete, but the reset failed ... */
		if (i915_terminally_wedged(error))
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
			return -EIO;

		return -EAGAIN;
	}

	return 0;
}

/*
 * Compare seqno against outstanding lazy request. Emit a request if they are
 * equal.
 */
1098
int
1099
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1100 1101 1102 1103 1104 1105
{
	int ret;

	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));

	ret = 0;
1106
	if (seqno == ring->outstanding_lazy_seqno)
1107
		ret = i915_add_request(ring, NULL);
1108 1109 1110 1111

	return ret;
}

1112 1113 1114 1115 1116 1117
static void fake_irq(unsigned long data)
{
	wake_up_process((struct task_struct *)data);
}

static bool missed_irq(struct drm_i915_private *dev_priv,
1118
		       struct intel_engine_cs *ring)
1119 1120 1121 1122
{
	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}

1123 1124 1125 1126 1127 1128 1129 1130
static bool can_wait_boost(struct drm_i915_file_private *file_priv)
{
	if (file_priv == NULL)
		return true;

	return !atomic_xchg(&file_priv->rps_wait_boost, true);
}

1131 1132 1133 1134
/**
 * __wait_seqno - wait until execution of seqno has finished
 * @ring: the ring expected to report seqno
 * @seqno: duh!
1135
 * @reset_counter: reset sequence associated with the given seqno
1136 1137 1138
 * @interruptible: do an interruptible wait (normally yes)
 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
 *
1139 1140 1141 1142 1143 1144 1145
 * Note: It is of utmost importance that the passed in seqno and reset_counter
 * values have been read by the caller in an smp safe manner. Where read-side
 * locks are involved, it is sufficient to read the reset_counter before
 * unlocking the lock that protects the seqno. For lockless tricks, the
 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
 * inserted.
 *
1146 1147 1148
 * Returns 0 if the seqno was found within the alloted time. Else returns the
 * errno with remaining time filled in timeout argument.
 */
1149
static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1150
			unsigned reset_counter,
1151 1152 1153
			bool interruptible,
			struct timespec *timeout,
			struct drm_i915_file_private *file_priv)
1154
{
1155
	struct drm_device *dev = ring->dev;
1156
	struct drm_i915_private *dev_priv = dev->dev_private;
1157 1158
	const bool irq_test_in_progress =
		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1159 1160
	struct timespec before, now;
	DEFINE_WAIT(wait);
1161
	unsigned long timeout_expire;
1162 1163
	int ret;

1164
	WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
1165

1166 1167 1168
	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
		return 0;

1169
	timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
1170

1171
	if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
1172 1173 1174 1175 1176 1177 1178
		gen6_rps_boost(dev_priv);
		if (file_priv)
			mod_delayed_work(dev_priv->wq,
					 &file_priv->mm.idle_work,
					 msecs_to_jiffies(100));
	}

1179
	if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1180 1181
		return -ENODEV;

1182 1183
	/* Record current time in case interrupted by signal, or wedged */
	trace_i915_gem_request_wait_begin(ring, seqno);
1184
	getrawmonotonic(&before);
1185 1186
	for (;;) {
		struct timer_list timer;
1187

1188 1189
		prepare_to_wait(&ring->irq_queue, &wait,
				interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1190

1191 1192
		/* We need to check whether any gpu reset happened in between
		 * the caller grabbing the seqno and now ... */
1193 1194 1195 1196 1197 1198 1199 1200
		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
			/* ... but upgrade the -EAGAIN to an -EIO if the gpu
			 * is truely gone. */
			ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
			if (ret == 0)
				ret = -EAGAIN;
			break;
		}
1201

1202 1203 1204 1205
		if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
			ret = 0;
			break;
		}
1206

1207 1208 1209 1210 1211
		if (interruptible && signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

1212
		if (timeout && time_after_eq(jiffies, timeout_expire)) {
1213 1214 1215 1216 1217 1218
			ret = -ETIME;
			break;
		}

		timer.function = NULL;
		if (timeout || missed_irq(dev_priv, ring)) {
1219 1220
			unsigned long expire;

1221
			setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1222
			expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1223 1224 1225
			mod_timer(&timer, expire);
		}

1226
		io_schedule();
1227 1228 1229 1230 1231 1232

		if (timer.function) {
			del_singleshot_timer_sync(&timer);
			destroy_timer_on_stack(&timer);
		}
	}
1233
	getrawmonotonic(&now);
1234
	trace_i915_gem_request_wait_end(ring, seqno);
1235

1236 1237
	if (!irq_test_in_progress)
		ring->irq_put(ring);
1238 1239

	finish_wait(&ring->irq_queue, &wait);
1240 1241 1242 1243

	if (timeout) {
		struct timespec sleep_time = timespec_sub(now, before);
		*timeout = timespec_sub(*timeout, sleep_time);
1244 1245
		if (!timespec_valid(timeout)) /* i.e. negative time remains */
			set_normalized_timespec(timeout, 0, 0);
1246 1247
	}

1248
	return ret;
1249 1250 1251 1252 1253 1254 1255
}

/**
 * Waits for a sequence number to be signaled, and cleans up the
 * request and object lists appropriately for that event.
 */
int
1256
i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1257 1258 1259 1260 1261 1262 1263 1264 1265
{
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible = dev_priv->mm.interruptible;
	int ret;

	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
	BUG_ON(seqno == 0);

1266
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1267 1268 1269 1270 1271 1272 1273
	if (ret)
		return ret;

	ret = i915_gem_check_olr(ring, seqno);
	if (ret)
		return ret;

1274 1275
	return __wait_seqno(ring, seqno,
			    atomic_read(&dev_priv->gpu_error.reset_counter),
1276
			    interruptible, NULL, NULL);
1277 1278
}

1279 1280
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1281
				     struct intel_engine_cs *ring)
1282
{
1283 1284
	if (!obj->active)
		return 0;
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297

	/* Manually manage the write flush as we may have not yet
	 * retired the buffer.
	 *
	 * Note that the last_write_seqno is always the earlier of
	 * the two (read/write) seqno, so if we haved successfully waited,
	 * we know we have passed the last write.
	 */
	obj->last_write_seqno = 0;

	return 0;
}

1298 1299 1300 1301 1302 1303 1304 1305
/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
 */
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly)
{
1306
	struct intel_engine_cs *ring = obj->ring;
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	u32 seqno;
	int ret;

	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
	if (seqno == 0)
		return 0;

	ret = i915_wait_seqno(ring, seqno);
	if (ret)
		return ret;

1318
	return i915_gem_object_wait_rendering__tail(obj, ring);
1319 1320
}

1321 1322 1323 1324 1325
/* A nonblocking variant of the above wait. This is a highly dangerous routine
 * as the object state may change during this call.
 */
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1326
					    struct drm_i915_file_private *file_priv,
1327 1328 1329 1330
					    bool readonly)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
1331
	struct intel_engine_cs *ring = obj->ring;
1332
	unsigned reset_counter;
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
	u32 seqno;
	int ret;

	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
	BUG_ON(!dev_priv->mm.interruptible);

	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
	if (seqno == 0)
		return 0;

1343
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1344 1345 1346 1347 1348 1349 1350
	if (ret)
		return ret;

	ret = i915_gem_check_olr(ring, seqno);
	if (ret)
		return ret;

1351
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1352
	mutex_unlock(&dev->struct_mutex);
1353
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1354
	mutex_lock(&dev->struct_mutex);
1355 1356
	if (ret)
		return ret;
1357

1358
	return i915_gem_object_wait_rendering__tail(obj, ring);
1359 1360
}

1361
/**
1362 1363
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1364 1365 1366
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1367
			  struct drm_file *file)
1368 1369
{
	struct drm_i915_gem_set_domain *args = data;
1370
	struct drm_i915_gem_object *obj;
1371 1372
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1373 1374
	int ret;

1375
	/* Only handle setting domains to types used by the CPU. */
1376
	if (write_domain & I915_GEM_GPU_DOMAINS)
1377 1378
		return -EINVAL;

1379
	if (read_domains & I915_GEM_GPU_DOMAINS)
1380 1381 1382 1383 1384 1385 1386 1387
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1388
	ret = i915_mutex_lock_interruptible(dev);
1389
	if (ret)
1390
		return ret;
1391

1392
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1393
	if (&obj->base == NULL) {
1394 1395
		ret = -ENOENT;
		goto unlock;
1396
	}
1397

1398 1399
	intel_edp_psr_exit(dev, true);

1400 1401 1402 1403
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1404 1405 1406
	ret = i915_gem_object_wait_rendering__nonblocking(obj,
							  file->driver_priv,
							  !write_domain);
1407 1408 1409
	if (ret)
		goto unref;

1410 1411
	if (read_domains & I915_GEM_DOMAIN_GTT) {
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1412 1413 1414 1415 1416 1417 1418

		/* Silently promote "you're not bound, there was nothing to do"
		 * to success, since the client was just asking us to
		 * make sure everything was done.
		 */
		if (ret == -EINVAL)
			ret = 0;
1419
	} else {
1420
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1421 1422
	}

1423
unref:
1424
	drm_gem_object_unreference(&obj->base);
1425
unlock:
1426 1427 1428 1429 1430 1431 1432 1433 1434
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Called when user space has done writes to this buffer
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1435
			 struct drm_file *file)
1436 1437
{
	struct drm_i915_gem_sw_finish *args = data;
1438
	struct drm_i915_gem_object *obj;
1439 1440
	int ret = 0;

1441
	ret = i915_mutex_lock_interruptible(dev);
1442
	if (ret)
1443
		return ret;
1444

1445 1446
	intel_edp_psr_exit(dev, true);

1447
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1448
	if (&obj->base == NULL) {
1449 1450
		ret = -ENOENT;
		goto unlock;
1451 1452 1453
	}

	/* Pinned buffers may be scanout, so flush the cache */
1454 1455
	if (obj->pin_display)
		i915_gem_object_flush_cpu_write_domain(obj, true);
1456

1457
	drm_gem_object_unreference(&obj->base);
1458
unlock:
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Maps the contents of an object, returning the address it is mapped
 * into.
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1472
		    struct drm_file *file)
1473 1474 1475 1476 1477
{
	struct drm_i915_gem_mmap *args = data;
	struct drm_gem_object *obj;
	unsigned long addr;

1478
	obj = drm_gem_object_lookup(dev, file, args->handle);
1479
	if (obj == NULL)
1480
		return -ENOENT;
1481

1482 1483 1484 1485 1486 1487 1488 1489
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
	if (!obj->filp) {
		drm_gem_object_unreference_unlocked(obj);
		return -EINVAL;
	}

1490
	addr = vm_mmap(obj->filp, 0, args->size,
1491 1492
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1493
	drm_gem_object_unreference_unlocked(obj);
1494 1495 1496 1497 1498 1499 1500 1501
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
/**
 * i915_gem_fault - fault a page into the GTT
 * vma: VMA in question
 * vmf: fault info
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
 */
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
1520 1521
	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
	struct drm_device *dev = obj->base.dev;
1522
	struct drm_i915_private *dev_priv = dev->dev_private;
1523 1524 1525
	pgoff_t page_offset;
	unsigned long pfn;
	int ret = 0;
1526
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1527

1528 1529
	intel_runtime_pm_get(dev_priv);

1530 1531 1532 1533
	/* We don't use vmf->pgoff since that has the fake offset */
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;

1534 1535 1536
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto out;
1537

C
Chris Wilson 已提交
1538 1539
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1540 1541 1542 1543 1544 1545 1546 1547 1548
	/* Try to flush the object off the GPU first without holding the lock.
	 * Upon reacquiring the lock, we will perform our sanity checks and then
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
	ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
	if (ret)
		goto unlock;

1549 1550
	/* Access to snoopable pages through the GTT is incoherent. */
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1551
		ret = -EFAULT;
1552 1553 1554
		goto unlock;
	}

1555
	/* Now bind it into the GTT if needed */
1556
	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1557 1558
	if (ret)
		goto unlock;
1559

1560 1561 1562
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
		goto unpin;
1563

1564
	ret = i915_gem_object_get_fence(obj);
1565
	if (ret)
1566
		goto unpin;
1567

1568
	/* Finally, remap it using the new GTT offset */
1569 1570
	pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
	pfn >>= PAGE_SHIFT;
1571

1572
	if (!obj->fault_mappable) {
1573 1574 1575
		unsigned long size = min_t(unsigned long,
					   vma->vm_end - vma->vm_start,
					   obj->base.size);
1576 1577
		int i;

1578
		for (i = 0; i < size >> PAGE_SHIFT; i++) {
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
			ret = vm_insert_pfn(vma,
					    (unsigned long)vma->vm_start + i * PAGE_SIZE,
					    pfn + i);
			if (ret)
				break;
		}

		obj->fault_mappable = true;
	} else
		ret = vm_insert_pfn(vma,
				    (unsigned long)vmf->virtual_address,
				    pfn + page_offset);
1591
unpin:
B
Ben Widawsky 已提交
1592
	i915_gem_object_ggtt_unpin(obj);
1593
unlock:
1594
	mutex_unlock(&dev->struct_mutex);
1595
out:
1596
	switch (ret) {
1597
	case -EIO:
1598 1599 1600
		/* If this -EIO is due to a gpu hang, give the reset code a
		 * chance to clean up the mess. Otherwise return the proper
		 * SIGBUS. */
1601 1602 1603 1604
		if (i915_terminally_wedged(&dev_priv->gpu_error)) {
			ret = VM_FAULT_SIGBUS;
			break;
		}
1605
	case -EAGAIN:
D
Daniel Vetter 已提交
1606 1607 1608 1609
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1610
		 */
1611 1612
	case 0:
	case -ERESTARTSYS:
1613
	case -EINTR:
1614 1615 1616 1617 1618
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1619 1620
		ret = VM_FAULT_NOPAGE;
		break;
1621
	case -ENOMEM:
1622 1623
		ret = VM_FAULT_OOM;
		break;
1624
	case -ENOSPC:
1625
	case -EFAULT:
1626 1627
		ret = VM_FAULT_SIGBUS;
		break;
1628
	default:
1629
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1630 1631
		ret = VM_FAULT_SIGBUS;
		break;
1632
	}
1633 1634 1635

	intel_runtime_pm_put(dev_priv);
	return ret;
1636 1637
}

1638 1639 1640 1641
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1642
 * Preserve the reservation of the mmapping with the DRM core code, but
1643 1644 1645 1646 1647 1648 1649 1650 1651
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1652
void
1653
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1654
{
1655 1656
	if (!obj->fault_mappable)
		return;
1657

1658 1659
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
1660
	obj->fault_mappable = false;
1661 1662
}

1663 1664 1665 1666 1667 1668 1669 1670 1671
void
i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;

	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
		i915_gem_release_mmap(obj);
}

1672
uint32_t
1673
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1674
{
1675
	uint32_t gtt_size;
1676 1677

	if (INTEL_INFO(dev)->gen >= 4 ||
1678 1679
	    tiling_mode == I915_TILING_NONE)
		return size;
1680 1681 1682

	/* Previous chips need a power-of-two fence region when tiling */
	if (INTEL_INFO(dev)->gen == 3)
1683
		gtt_size = 1024*1024;
1684
	else
1685
		gtt_size = 512*1024;
1686

1687 1688
	while (gtt_size < size)
		gtt_size <<= 1;
1689

1690
	return gtt_size;
1691 1692
}

1693 1694 1695 1696 1697
/**
 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
 * @obj: object to check
 *
 * Return the required GTT alignment for an object, taking into account
1698
 * potential fence register mapping.
1699
 */
1700 1701 1702
uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
			   int tiling_mode, bool fenced)
1703 1704 1705 1706 1707
{
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
1708
	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1709
	    tiling_mode == I915_TILING_NONE)
1710 1711
		return 4096;

1712 1713 1714 1715
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
1716
	return i915_gem_get_gtt_size(dev, size, tiling_mode);
1717 1718
}

1719 1720 1721 1722 1723
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	int ret;

1724
	if (drm_vma_node_has_offset(&obj->base.vma_node))
1725 1726
		return 0;

1727 1728
	dev_priv->mm.shrinker_no_lock_stealing = true;

1729 1730
	ret = drm_gem_create_mmap_offset(&obj->base);
	if (ret != -ENOSPC)
1731
		goto out;
1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742

	/* Badly fragmented mmap space? The only way we can recover
	 * space is by destroying unwanted objects. We can't randomly release
	 * mmap_offsets as userspace expects them to be persistent for the
	 * lifetime of the objects. The closest we can is to release the
	 * offsets on purgeable objects by truncating it and marking it purged,
	 * which prevents userspace from ever using that object again.
	 */
	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
	ret = drm_gem_create_mmap_offset(&obj->base);
	if (ret != -ENOSPC)
1743
		goto out;
1744 1745

	i915_gem_shrink_all(dev_priv);
1746 1747 1748 1749 1750
	ret = drm_gem_create_mmap_offset(&obj->base);
out:
	dev_priv->mm.shrinker_no_lock_stealing = false;

	return ret;
1751 1752 1753 1754 1755 1756 1757
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

1758
int
1759 1760 1761 1762
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
		  uint32_t handle,
		  uint64_t *offset)
1763
{
1764
	struct drm_i915_private *dev_priv = dev->dev_private;
1765
	struct drm_i915_gem_object *obj;
1766 1767
	int ret;

1768
	ret = i915_mutex_lock_interruptible(dev);
1769
	if (ret)
1770
		return ret;
1771

1772
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1773
	if (&obj->base == NULL) {
1774 1775 1776
		ret = -ENOENT;
		goto unlock;
	}
1777

B
Ben Widawsky 已提交
1778
	if (obj->base.size > dev_priv->gtt.mappable_end) {
1779
		ret = -E2BIG;
1780
		goto out;
1781 1782
	}

1783
	if (obj->madv != I915_MADV_WILLNEED) {
1784
		DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1785
		ret = -EFAULT;
1786
		goto out;
1787 1788
	}

1789 1790 1791
	ret = i915_gem_object_create_mmap_offset(obj);
	if (ret)
		goto out;
1792

1793
	*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1794

1795
out:
1796
	drm_gem_object_unreference(&obj->base);
1797
unlock:
1798
	mutex_unlock(&dev->struct_mutex);
1799
	return ret;
1800 1801
}

1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}

1826 1827 1828 1829 1830 1831
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
	return obj->madv == I915_MADV_DONTNEED;
}

D
Daniel Vetter 已提交
1832 1833 1834
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1835
{
1836
	i915_gem_object_free_mmap_offset(obj);
1837

1838 1839
	if (obj->base.filp == NULL)
		return;
1840

D
Daniel Vetter 已提交
1841 1842 1843 1844 1845
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
1846
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
D
Daniel Vetter 已提交
1847 1848
	obj->madv = __I915_MADV_PURGED;
}
1849

1850 1851 1852
/* Try to discard unwanted pages */
static void
i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
1853
{
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
	struct address_space *mapping;

	switch (obj->madv) {
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

	mapping = file_inode(obj->base.filp)->i_mapping,
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1868 1869
}

1870
static void
1871
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1872
{
1873 1874
	struct sg_page_iter sg_iter;
	int ret;
1875

1876
	BUG_ON(obj->madv == __I915_MADV_PURGED);
1877

C
Chris Wilson 已提交
1878 1879 1880 1881 1882 1883
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret) {
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
		WARN_ON(ret != -EIO);
1884
		i915_gem_clflush_object(obj, true);
C
Chris Wilson 已提交
1885 1886 1887
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

1888
	if (i915_gem_object_needs_bit17_swizzle(obj))
1889 1890
		i915_gem_object_save_bit_17_swizzle(obj);

1891 1892
	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
1893

1894
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1895
		struct page *page = sg_page_iter_page(&sg_iter);
1896

1897
		if (obj->dirty)
1898
			set_page_dirty(page);
1899

1900
		if (obj->madv == I915_MADV_WILLNEED)
1901
			mark_page_accessed(page);
1902

1903
		page_cache_release(page);
1904
	}
1905
	obj->dirty = 0;
1906

1907 1908
	sg_free_table(obj->pages);
	kfree(obj->pages);
1909
}
C
Chris Wilson 已提交
1910

1911
int
1912 1913 1914 1915
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
	const struct drm_i915_gem_object_ops *ops = obj->ops;

1916
	if (obj->pages == NULL)
1917 1918
		return 0;

1919 1920 1921
	if (obj->pages_pin_count)
		return -EBUSY;

1922
	BUG_ON(i915_gem_obj_bound_any(obj));
B
Ben Widawsky 已提交
1923

1924 1925 1926
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
1927
	list_del(&obj->global_list);
1928

1929
	ops->put_pages(obj);
1930
	obj->pages = NULL;
1931

1932
	i915_gem_object_invalidate(obj);
C
Chris Wilson 已提交
1933 1934 1935 1936

	return 0;
}

1937
static unsigned long
1938 1939
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
		  bool purgeable_only)
C
Chris Wilson 已提交
1940
{
1941 1942
	struct list_head still_in_list;
	struct drm_i915_gem_object *obj;
1943
	unsigned long count = 0;
C
Chris Wilson 已提交
1944

1945
	/*
1946
	 * As we may completely rewrite the (un)bound list whilst unbinding
1947 1948 1949
	 * (due to retiring requests) we have to strictly process only
	 * one element of the list at the time, and recheck the list
	 * on every iteration.
1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
	 *
	 * In particular, we must hold a reference whilst removing the
	 * object as we may end up waiting for and/or retiring the objects.
	 * This might release the final reference (held by the active list)
	 * and result in the object being freed from under us. This is
	 * similar to the precautions the eviction code must take whilst
	 * removing objects.
	 *
	 * Also note that although these lists do not hold a reference to
	 * the object we can safely grab one here: The final object
	 * unreferencing and the bound_list are both protected by the
	 * dev->struct_mutex and so we won't ever be able to observe an
	 * object on the bound_list with a reference count equals 0.
1963
	 */
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
	INIT_LIST_HEAD(&still_in_list);
	while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
		obj = list_first_entry(&dev_priv->mm.unbound_list,
				       typeof(*obj), global_list);
		list_move_tail(&obj->global_list, &still_in_list);

		if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
			continue;

		drm_gem_object_reference(&obj->base);

		if (i915_gem_object_put_pages(obj) == 0)
			count += obj->base.size >> PAGE_SHIFT;

		drm_gem_object_unreference(&obj->base);
	}
	list_splice(&still_in_list, &dev_priv->mm.unbound_list);

	INIT_LIST_HEAD(&still_in_list);
1983
	while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1984
		struct i915_vma *vma, *v;
1985

1986 1987
		obj = list_first_entry(&dev_priv->mm.bound_list,
				       typeof(*obj), global_list);
1988
		list_move_tail(&obj->global_list, &still_in_list);
1989

1990 1991 1992
		if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
			continue;

1993 1994
		drm_gem_object_reference(&obj->base);

1995 1996 1997
		list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
			if (i915_vma_unbind(vma))
				break;
1998

1999
		if (i915_gem_object_put_pages(obj) == 0)
C
Chris Wilson 已提交
2000
			count += obj->base.size >> PAGE_SHIFT;
2001 2002

		drm_gem_object_unreference(&obj->base);
C
Chris Wilson 已提交
2003
	}
2004
	list_splice(&still_in_list, &dev_priv->mm.bound_list);
C
Chris Wilson 已提交
2005 2006 2007 2008

	return count;
}

2009
static unsigned long
2010 2011 2012 2013 2014
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
	return __i915_gem_shrink(dev_priv, target, true);
}

2015
static unsigned long
C
Chris Wilson 已提交
2016 2017 2018
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
	i915_gem_evict_everything(dev_priv->dev);
2019
	return __i915_gem_shrink(dev_priv, LONG_MAX, false);
D
Daniel Vetter 已提交
2020 2021
}

2022
static int
C
Chris Wilson 已提交
2023
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2024
{
C
Chris Wilson 已提交
2025
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2026 2027
	int page_count, i;
	struct address_space *mapping;
2028 2029
	struct sg_table *st;
	struct scatterlist *sg;
2030
	struct sg_page_iter sg_iter;
2031
	struct page *page;
2032
	unsigned long last_pfn = 0;	/* suppress gcc warning */
C
Chris Wilson 已提交
2033
	gfp_t gfp;
2034

C
Chris Wilson 已提交
2035 2036 2037 2038 2039 2040 2041
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);

2042 2043 2044 2045
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

2046
	page_count = obj->base.size / PAGE_SIZE;
2047 2048
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2049
		return -ENOMEM;
2050
	}
2051

2052 2053 2054 2055 2056
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
A
Al Viro 已提交
2057
	mapping = file_inode(obj->base.filp)->i_mapping;
C
Chris Wilson 已提交
2058
	gfp = mapping_gfp_mask(mapping);
2059
	gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
C
Chris Wilson 已提交
2060
	gfp &= ~(__GFP_IO | __GFP_WAIT);
2061 2062 2063
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
			i915_gem_purge(dev_priv, page_count);
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
			i915_gem_shrink_all(dev_priv);
2075
			page = shmem_read_mapping_page(mapping, i);
C
Chris Wilson 已提交
2076 2077 2078
			if (IS_ERR(page))
				goto err_pages;
		}
2079 2080 2081 2082 2083 2084 2085 2086
#ifdef CONFIG_SWIOTLB
		if (swiotlb_nr_tbl()) {
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
			sg = sg_next(sg);
			continue;
		}
#endif
2087 2088 2089 2090 2091 2092 2093 2094 2095
		if (!i || page_to_pfn(page) != last_pfn + 1) {
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2096 2097 2098

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2099
	}
2100 2101 2102 2103
#ifdef CONFIG_SWIOTLB
	if (!swiotlb_nr_tbl())
#endif
		sg_mark_end(sg);
2104 2105
	obj->pages = st;

2106
	if (i915_gem_object_needs_bit17_swizzle(obj))
2107 2108 2109 2110 2111
		i915_gem_object_do_bit_17_swizzle(obj);

	return 0;

err_pages:
2112 2113
	sg_mark_end(sg);
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2114
		page_cache_release(sg_page_iter_page(&sg_iter));
2115 2116
	sg_free_table(st);
	kfree(st);
2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
	if (PTR_ERR(page) == -ENOSPC)
		return -ENOMEM;
	else
		return PTR_ERR(page);
2130 2131
}

2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
/* Ensure that the associated pages are gathered from the backing storage
 * and pinned into our object. i915_gem_object_get_pages() may be called
 * multiple times before they are released by a single call to
 * i915_gem_object_put_pages() - once the pages are no longer referenced
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	const struct drm_i915_gem_object_ops *ops = obj->ops;
	int ret;

2146
	if (obj->pages)
2147 2148
		return 0;

2149
	if (obj->madv != I915_MADV_WILLNEED) {
2150
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
2151
		return -EFAULT;
2152 2153
	}

2154 2155
	BUG_ON(obj->pages_pin_count);

2156 2157 2158 2159
	ret = ops->get_pages(obj);
	if (ret)
		return ret;

2160
	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2161
	return 0;
2162 2163
}

B
Ben Widawsky 已提交
2164
static void
2165
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2166
			       struct intel_engine_cs *ring)
2167
{
2168
	struct drm_device *dev = obj->base.dev;
2169
	struct drm_i915_private *dev_priv = dev->dev_private;
2170
	u32 seqno = intel_ring_get_seqno(ring);
2171

2172
	BUG_ON(ring == NULL);
2173 2174 2175 2176
	if (obj->ring != ring && obj->last_write_seqno) {
		/* Keep the seqno relative to the current ring */
		obj->last_write_seqno = seqno;
	}
2177
	obj->ring = ring;
2178 2179

	/* Add a reference if we're newly entering the active list. */
2180 2181 2182
	if (!obj->active) {
		drm_gem_object_reference(&obj->base);
		obj->active = 1;
2183
	}
2184

2185
	list_move_tail(&obj->ring_list, &ring->active_list);
2186

2187
	obj->last_read_seqno = seqno;
2188

2189
	if (obj->fenced_gpu_access) {
2190 2191
		obj->last_fenced_seqno = seqno;

2192 2193 2194 2195 2196 2197 2198 2199
		/* Bump MRU to take account of the delayed flush */
		if (obj->fence_reg != I915_FENCE_REG_NONE) {
			struct drm_i915_fence_reg *reg;

			reg = &dev_priv->fence_regs[obj->fence_reg];
			list_move_tail(&reg->lru_list,
				       &dev_priv->mm.fence_list);
		}
2200 2201 2202
	}
}

B
Ben Widawsky 已提交
2203
void i915_vma_move_to_active(struct i915_vma *vma,
2204
			     struct intel_engine_cs *ring)
B
Ben Widawsky 已提交
2205 2206 2207 2208 2209
{
	list_move_tail(&vma->mm_list, &vma->vm->active_list);
	return i915_gem_object_move_to_active(vma->obj, ring);
}

2210 2211
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2212
{
B
Ben Widawsky 已提交
2213
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2214 2215
	struct i915_address_space *vm;
	struct i915_vma *vma;
2216

2217
	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2218
	BUG_ON(!obj->active);
2219

2220 2221 2222 2223 2224
	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
		vma = i915_gem_obj_to_vma(obj, vm);
		if (vma && !list_empty(&vma->mm_list))
			list_move_tail(&vma->mm_list, &vm->inactive_list);
	}
2225

2226
	list_del_init(&obj->ring_list);
2227 2228
	obj->ring = NULL;

2229 2230 2231 2232 2233
	obj->last_read_seqno = 0;
	obj->last_write_seqno = 0;
	obj->base.write_domain = 0;

	obj->last_fenced_seqno = 0;
2234 2235 2236 2237 2238 2239
	obj->fenced_gpu_access = false;

	obj->active = 0;
	drm_gem_object_unreference(&obj->base);

	WARN_ON(i915_verify_lists(dev));
2240
}
2241

2242 2243 2244
static void
i915_gem_object_retire(struct drm_i915_gem_object *obj)
{
2245
	struct intel_engine_cs *ring = obj->ring;
2246 2247 2248 2249 2250 2251 2252 2253 2254

	if (ring == NULL)
		return;

	if (i915_seqno_passed(ring->get_seqno(ring, true),
			      obj->last_read_seqno))
		i915_gem_object_move_to_inactive(obj);
}

2255
static int
2256
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2257
{
2258
	struct drm_i915_private *dev_priv = dev->dev_private;
2259
	struct intel_engine_cs *ring;
2260
	int ret, i, j;
2261

2262
	/* Carefully retire all requests without writing to the rings */
2263
	for_each_ring(ring, dev_priv, i) {
2264 2265 2266
		ret = intel_ring_idle(ring);
		if (ret)
			return ret;
2267 2268
	}
	i915_gem_retire_requests(dev);
2269 2270

	/* Finally reset hw state */
2271
	for_each_ring(ring, dev_priv, i) {
2272
		intel_ring_init_seqno(ring, seqno);
2273

2274 2275
		for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
			ring->semaphore.sync_seqno[j] = 0;
2276
	}
2277

2278
	return 0;
2279 2280
}

2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	if (seqno == 0)
		return -EINVAL;

	/* HWS page needs to be set less than what we
	 * will inject to ring
	 */
	ret = i915_gem_init_seqno(dev, seqno - 1);
	if (ret)
		return ret;

	/* Carefully set the last_seqno value so that wrap
	 * detection still works
	 */
	dev_priv->next_seqno = seqno;
	dev_priv->last_seqno = seqno - 1;
	if (dev_priv->last_seqno == 0)
		dev_priv->last_seqno--;

	return 0;
}

2307 2308
int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2309
{
2310 2311 2312 2313
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* reserve 0 for non-seqno */
	if (dev_priv->next_seqno == 0) {
2314
		int ret = i915_gem_init_seqno(dev, 0);
2315 2316
		if (ret)
			return ret;
2317

2318 2319
		dev_priv->next_seqno = 1;
	}
2320

2321
	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2322
	return 0;
2323 2324
}

2325
int __i915_add_request(struct intel_engine_cs *ring,
2326
		       struct drm_file *file,
2327
		       struct drm_i915_gem_object *obj,
2328
		       u32 *out_seqno)
2329
{
2330
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2331
	struct drm_i915_gem_request *request;
2332
	u32 request_ring_position, request_start;
2333 2334
	int ret;

2335
	request_start = intel_ring_get_tail(ring);
2336 2337 2338 2339 2340 2341 2342
	/*
	 * Emit any outstanding flushes - execbuf can fail to emit the flush
	 * after having emitted the batchbuffer command. Hence we need to fix
	 * things up similar to emitting the lazy request. The difference here
	 * is that the flush _must_ happen before the next request, no matter
	 * what.
	 */
2343 2344 2345
	ret = intel_ring_flush_all_caches(ring);
	if (ret)
		return ret;
2346

2347 2348
	request = ring->preallocated_lazy_request;
	if (WARN_ON(request == NULL))
2349
		return -ENOMEM;
2350

2351 2352 2353 2354 2355 2356 2357
	/* Record the position of the start of the request so that
	 * should we detect the updated seqno part-way through the
	 * GPU processing the request, we never over-estimate the
	 * position of the head.
	 */
	request_ring_position = intel_ring_get_tail(ring);

2358
	ret = ring->add_request(ring);
2359
	if (ret)
2360
		return ret;
2361

2362
	request->seqno = intel_ring_get_seqno(ring);
2363
	request->ring = ring;
2364
	request->head = request_start;
2365
	request->tail = request_ring_position;
2366 2367 2368 2369 2370 2371 2372

	/* Whilst this request exists, batch_obj will be on the
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2373
	request->batch_obj = obj;
2374

2375 2376 2377 2378
	/* Hold a reference to the current context so that we can inspect
	 * it later in case a hangcheck error event fires.
	 */
	request->ctx = ring->last_context;
2379 2380 2381
	if (request->ctx)
		i915_gem_context_reference(request->ctx);

2382
	request->emitted_jiffies = jiffies;
2383
	list_add_tail(&request->list, &ring->request_list);
2384
	request->file_priv = NULL;
2385

C
Chris Wilson 已提交
2386 2387 2388
	if (file) {
		struct drm_i915_file_private *file_priv = file->driver_priv;

2389
		spin_lock(&file_priv->mm.lock);
2390
		request->file_priv = file_priv;
2391
		list_add_tail(&request->client_list,
2392
			      &file_priv->mm.request_list);
2393
		spin_unlock(&file_priv->mm.lock);
2394
	}
2395

2396
	trace_i915_gem_request_add(ring, request->seqno);
2397
	ring->outstanding_lazy_seqno = 0;
2398
	ring->preallocated_lazy_request = NULL;
C
Chris Wilson 已提交
2399

2400
	if (!dev_priv->ums.mm_suspended) {
2401 2402
		i915_queue_hangcheck(ring->dev);

2403 2404 2405 2406 2407
		cancel_delayed_work_sync(&dev_priv->mm.idle_work);
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->mm.retire_work,
				   round_jiffies_up_relative(HZ));
		intel_mark_busy(dev_priv->dev);
B
Ben Gamari 已提交
2408
	}
2409

2410
	if (out_seqno)
2411
		*out_seqno = request->seqno;
2412
	return 0;
2413 2414
}

2415 2416
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2417
{
2418
	struct drm_i915_file_private *file_priv = request->file_priv;
2419

2420 2421
	if (!file_priv)
		return;
C
Chris Wilson 已提交
2422

2423
	spin_lock(&file_priv->mm.lock);
2424 2425
	list_del(&request->client_list);
	request->file_priv = NULL;
2426
	spin_unlock(&file_priv->mm.lock);
2427 2428
}

2429
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2430
				   const struct intel_context *ctx)
2431
{
2432
	unsigned long elapsed;
2433

2434 2435 2436
	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;

	if (ctx->hang_stats.banned)
2437 2438 2439
		return true;

	if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2440
		if (!i915_gem_context_is_default(ctx)) {
2441
			DRM_DEBUG("context hanging too fast, banning!\n");
2442
			return true;
2443 2444 2445
		} else if (i915_stop_ring_allow_ban(dev_priv)) {
			if (i915_stop_ring_allow_warn(dev_priv))
				DRM_ERROR("gpu hanging too fast, banning!\n");
2446
			return true;
2447
		}
2448 2449 2450 2451 2452
	}

	return false;
}

2453
static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2454
				  struct intel_context *ctx,
2455
				  const bool guilty)
2456
{
2457 2458 2459 2460
	struct i915_ctx_hang_stats *hs;

	if (WARN_ON(!ctx))
		return;
2461

2462 2463 2464
	hs = &ctx->hang_stats;

	if (guilty) {
2465
		hs->banned = i915_context_is_banned(dev_priv, ctx);
2466 2467 2468 2469
		hs->batch_active++;
		hs->guilty_ts = get_seconds();
	} else {
		hs->batch_pending++;
2470 2471 2472
	}
}

2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
	list_del(&request->list);
	i915_gem_request_remove_from_client(request);

	if (request->ctx)
		i915_gem_context_unreference(request->ctx);

	kfree(request);
}

2484
struct drm_i915_gem_request *
2485
i915_gem_find_active_request(struct intel_engine_cs *ring)
2486
{
2487
	struct drm_i915_gem_request *request;
2488 2489 2490
	u32 completed_seqno;

	completed_seqno = ring->get_seqno(ring, false);
2491 2492 2493 2494

	list_for_each_entry(request, &ring->request_list, list) {
		if (i915_seqno_passed(completed_seqno, request->seqno))
			continue;
2495

2496
		return request;
2497
	}
2498 2499 2500 2501 2502

	return NULL;
}

static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2503
				       struct intel_engine_cs *ring)
2504 2505 2506 2507
{
	struct drm_i915_gem_request *request;
	bool ring_hung;

2508
	request = i915_gem_find_active_request(ring);
2509 2510 2511 2512 2513 2514

	if (request == NULL)
		return;

	ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;

2515
	i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2516 2517

	list_for_each_entry_continue(request, &ring->request_list, list)
2518
		i915_set_reset_status(dev_priv, request->ctx, false);
2519
}
2520

2521
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2522
					struct intel_engine_cs *ring)
2523
{
2524
	while (!list_empty(&ring->active_list)) {
2525
		struct drm_i915_gem_object *obj;
2526

2527 2528 2529
		obj = list_first_entry(&ring->active_list,
				       struct drm_i915_gem_object,
				       ring_list);
2530

2531
		i915_gem_object_move_to_inactive(obj);
2532
	}
2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549

	/*
	 * We must free the requests after all the corresponding objects have
	 * been moved off active lists. Which is the same order as the normal
	 * retire_requests function does. This is important if object hold
	 * implicit references on things like e.g. ppgtt address spaces through
	 * the request.
	 */
	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&ring->request_list,
					   struct drm_i915_gem_request,
					   list);

		i915_gem_free_request(request);
	}
2550 2551 2552 2553 2554

	/* These may not have been flush before the reset, do so now */
	kfree(ring->preallocated_lazy_request);
	ring->preallocated_lazy_request = NULL;
	ring->outstanding_lazy_seqno = 0;
2555 2556
}

2557
void i915_gem_restore_fences(struct drm_device *dev)
2558 2559 2560 2561
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

2562
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
2563
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2564

2565 2566 2567 2568 2569 2570 2571 2572 2573 2574
		/*
		 * Commit delayed tiling changes if we have an object still
		 * attached to the fence, otherwise just clear the fence.
		 */
		if (reg->obj) {
			i915_gem_object_update_fence(reg->obj, reg,
						     reg->obj->tiling_mode);
		} else {
			i915_gem_write_fence(dev, i, NULL);
		}
2575 2576 2577
	}
}

2578
void i915_gem_reset(struct drm_device *dev)
2579
{
2580
	struct drm_i915_private *dev_priv = dev->dev_private;
2581
	struct intel_engine_cs *ring;
2582
	int i;
2583

2584 2585 2586 2587 2588 2589 2590 2591
	/*
	 * Before we free the objects from the requests, we need to inspect
	 * them for finding the guilty party. As the requests only borrow
	 * their reference to the objects, the inspection must be done first.
	 */
	for_each_ring(ring, dev_priv, i)
		i915_gem_reset_ring_status(dev_priv, ring);

2592
	for_each_ring(ring, dev_priv, i)
2593
		i915_gem_reset_ring_cleanup(dev_priv, ring);
2594

2595 2596
	i915_gem_context_reset(dev);

2597
	i915_gem_restore_fences(dev);
2598 2599 2600 2601 2602
}

/**
 * This function clears the request list as sequence numbers are passed.
 */
2603
void
2604
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2605 2606 2607
{
	uint32_t seqno;

C
Chris Wilson 已提交
2608
	if (list_empty(&ring->request_list))
2609 2610
		return;

C
Chris Wilson 已提交
2611
	WARN_ON(i915_verify_lists(ring->dev));
2612

2613
	seqno = ring->get_seqno(ring, true);
2614

2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632
	/* Move any buffers on the active list that are no longer referenced
	 * by the ringbuffer to the flushing/inactive lists as appropriate,
	 * before we free the context associated with the requests.
	 */
	while (!list_empty(&ring->active_list)) {
		struct drm_i915_gem_object *obj;

		obj = list_first_entry(&ring->active_list,
				      struct drm_i915_gem_object,
				      ring_list);

		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
			break;

		i915_gem_object_move_to_inactive(obj);
	}


2633
	while (!list_empty(&ring->request_list)) {
2634 2635
		struct drm_i915_gem_request *request;

2636
		request = list_first_entry(&ring->request_list,
2637 2638 2639
					   struct drm_i915_gem_request,
					   list);

2640
		if (!i915_seqno_passed(seqno, request->seqno))
2641 2642
			break;

C
Chris Wilson 已提交
2643
		trace_i915_gem_request_retire(ring, request->seqno);
2644 2645 2646 2647 2648
		/* We know the GPU must have read the request to have
		 * sent us the seqno + interrupt, so use the position
		 * of tail of the request to update the last known position
		 * of the GPU head.
		 */
2649
		ring->buffer->last_retired_head = request->tail;
2650

2651
		i915_gem_free_request(request);
2652
	}
2653

C
Chris Wilson 已提交
2654 2655
	if (unlikely(ring->trace_irq_seqno &&
		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2656
		ring->irq_put(ring);
C
Chris Wilson 已提交
2657
		ring->trace_irq_seqno = 0;
2658
	}
2659

C
Chris Wilson 已提交
2660
	WARN_ON(i915_verify_lists(ring->dev));
2661 2662
}

2663
bool
2664 2665
i915_gem_retire_requests(struct drm_device *dev)
{
2666
	struct drm_i915_private *dev_priv = dev->dev_private;
2667
	struct intel_engine_cs *ring;
2668
	bool idle = true;
2669
	int i;
2670

2671
	for_each_ring(ring, dev_priv, i) {
2672
		i915_gem_retire_requests_ring(ring);
2673 2674 2675 2676 2677 2678 2679 2680 2681
		idle &= list_empty(&ring->request_list);
	}

	if (idle)
		mod_delayed_work(dev_priv->wq,
				   &dev_priv->mm.idle_work,
				   msecs_to_jiffies(100));

	return idle;
2682 2683
}

2684
static void
2685 2686
i915_gem_retire_work_handler(struct work_struct *work)
{
2687 2688 2689
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv), mm.retire_work.work);
	struct drm_device *dev = dev_priv->dev;
2690
	bool idle;
2691

2692
	/* Come back later if the device is busy... */
2693 2694 2695 2696
	idle = false;
	if (mutex_trylock(&dev->struct_mutex)) {
		idle = i915_gem_retire_requests(dev);
		mutex_unlock(&dev->struct_mutex);
2697
	}
2698
	if (!idle)
2699 2700
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
				   round_jiffies_up_relative(HZ));
2701
}
2702

2703 2704 2705 2706 2707 2708 2709
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
		container_of(work, typeof(*dev_priv), mm.idle_work.work);

	intel_mark_idle(dev_priv->dev);
2710 2711
}

2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722
/**
 * Ensures that an object will eventually get non-busy by flushing any required
 * write domains, emitting any outstanding lazy request and retiring and
 * completed requests.
 */
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
	int ret;

	if (obj->active) {
2723
		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2724 2725 2726 2727 2728 2729 2730 2731 2732
		if (ret)
			return ret;

		i915_gem_retire_requests_ring(obj->ring);
	}

	return 0;
}

2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
 * @DRM_IOCTL_ARGS: standard ioctl arguments
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
2758
	struct drm_i915_private *dev_priv = dev->dev_private;
2759 2760
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
2761
	struct intel_engine_cs *ring = NULL;
2762
	struct timespec timeout_stack, *timeout = NULL;
2763
	unsigned reset_counter;
2764 2765 2766
	u32 seqno = 0;
	int ret = 0;

2767 2768 2769 2770
	if (args->timeout_ns >= 0) {
		timeout_stack = ns_to_timespec(args->timeout_ns);
		timeout = &timeout_stack;
	}
2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
	if (&obj->base == NULL) {
		mutex_unlock(&dev->struct_mutex);
		return -ENOENT;
	}

2782 2783
	/* Need to make sure the object gets inactive eventually. */
	ret = i915_gem_object_flush_active(obj);
2784 2785 2786 2787
	if (ret)
		goto out;

	if (obj->active) {
2788
		seqno = obj->last_read_seqno;
2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803
		ring = obj->ring;
	}

	if (seqno == 0)
		 goto out;

	/* Do this after OLR check to make sure we make forward progress polling
	 * on this IOCTL with a 0 timeout (like busy ioctl)
	 */
	if (!args->timeout_ns) {
		ret = -ETIME;
		goto out;
	}

	drm_gem_object_unreference(&obj->base);
2804
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2805 2806
	mutex_unlock(&dev->struct_mutex);

2807
	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2808
	if (timeout)
2809
		args->timeout_ns = timespec_to_ns(timeout);
2810 2811 2812 2813 2814 2815 2816 2817
	return ret;

out:
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829
/**
 * i915_gem_object_sync - sync an object to a ring.
 *
 * @obj: object which may be in use on another ring.
 * @to: ring we wish to use the object on. May be NULL.
 *
 * This code is meant to abstract object synchronization with the GPU.
 * Calling with NULL implies synchronizing the object with the CPU
 * rather than a particular GPU ring.
 *
 * Returns 0 if successful, else propagates up the lower layer error.
 */
2830 2831
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
2832
		     struct intel_engine_cs *to)
2833
{
2834
	struct intel_engine_cs *from = obj->ring;
2835 2836 2837 2838 2839 2840
	u32 seqno;
	int ret, idx;

	if (from == NULL || to == from)
		return 0;

2841
	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2842
		return i915_gem_object_wait_rendering(obj, false);
2843 2844 2845

	idx = intel_ring_sync_index(from, to);

2846
	seqno = obj->last_read_seqno;
2847
	if (seqno <= from->semaphore.sync_seqno[idx])
2848 2849
		return 0;

2850 2851 2852
	ret = i915_gem_check_olr(obj->ring, seqno);
	if (ret)
		return ret;
2853

2854
	trace_i915_gem_ring_sync_to(from, to, seqno);
2855
	ret = to->semaphore.sync_to(to, from, seqno);
2856
	if (!ret)
2857 2858 2859 2860
		/* We use last_read_seqno because sync_to()
		 * might have just caused seqno wrap under
		 * the radar.
		 */
2861
		from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2862

2863
	return ret;
2864 2865
}

2866 2867 2868 2869 2870 2871 2872
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
	u32 old_write_domain, old_read_domains;

	/* Force a pagefault for domain tracking on next user access */
	i915_gem_release_mmap(obj);

2873 2874 2875
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		return;

2876 2877 2878
	/* Wait for any direct GTT access to complete */
	mb();

2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889
	old_read_domains = obj->base.read_domains;
	old_write_domain = obj->base.write_domain;

	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);
}

2890
int i915_vma_unbind(struct i915_vma *vma)
2891
{
2892
	struct drm_i915_gem_object *obj = vma->obj;
2893
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2894
	int ret;
2895

2896
	if (list_empty(&vma->vma_link))
2897 2898
		return 0;

2899 2900 2901 2902
	if (!drm_mm_node_allocated(&vma->node)) {
		i915_gem_vma_destroy(vma);
		return 0;
	}
2903

B
Ben Widawsky 已提交
2904
	if (vma->pin_count)
2905
		return -EBUSY;
2906

2907 2908
	BUG_ON(obj->pages == NULL);

2909
	ret = i915_gem_object_finish_gpu(obj);
2910
	if (ret)
2911 2912 2913 2914 2915 2916
		return ret;
	/* Continue on if we fail due to EIO, the GPU is hung so we
	 * should be safe and we need to cleanup or else we might
	 * cause memory corruption through use-after-free.
	 */

2917 2918
	if (i915_is_ggtt(vma->vm)) {
		i915_gem_object_finish_gtt(obj);
2919

2920 2921 2922 2923 2924
		/* release the fence reg _after_ flushing */
		ret = i915_gem_object_put_fence(obj);
		if (ret)
			return ret;
	}
2925

2926
	trace_i915_vma_unbind(vma);
C
Chris Wilson 已提交
2927

2928 2929
	vma->unbind_vma(vma);

2930
	i915_gem_gtt_finish_object(obj);
2931

2932
	list_del_init(&vma->mm_list);
2933
	/* Avoid an unnecessary call to unbind on rebind. */
2934 2935
	if (i915_is_ggtt(vma->vm))
		obj->map_and_fenceable = true;
2936

B
Ben Widawsky 已提交
2937 2938 2939 2940
	drm_mm_remove_node(&vma->node);
	i915_gem_vma_destroy(vma);

	/* Since the unbound list is global, only move to that list if
2941
	 * no more VMAs exist. */
B
Ben Widawsky 已提交
2942 2943
	if (list_empty(&obj->vma_list))
		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2944

2945 2946 2947 2948 2949 2950
	/* And finally now the object is completely decoupled from this vma,
	 * we can drop its hold on the backing storage and allow it to be
	 * reaped by the shrinker.
	 */
	i915_gem_object_unpin_pages(obj);

2951
	return 0;
2952 2953
}

2954
int i915_gpu_idle(struct drm_device *dev)
2955
{
2956
	struct drm_i915_private *dev_priv = dev->dev_private;
2957
	struct intel_engine_cs *ring;
2958
	int ret, i;
2959 2960

	/* Flush everything onto the inactive list. */
2961
	for_each_ring(ring, dev_priv, i) {
2962
		ret = i915_switch_context(ring, ring->default_context);
2963 2964 2965
		if (ret)
			return ret;

2966
		ret = intel_ring_idle(ring);
2967 2968 2969
		if (ret)
			return ret;
	}
2970

2971
	return 0;
2972 2973
}

2974 2975
static void i965_write_fence_reg(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj)
2976
{
2977
	struct drm_i915_private *dev_priv = dev->dev_private;
2978 2979
	int fence_reg;
	int fence_pitch_shift;
2980

2981 2982 2983 2984 2985 2986 2987 2988
	if (INTEL_INFO(dev)->gen >= 6) {
		fence_reg = FENCE_REG_SANDYBRIDGE_0;
		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
	} else {
		fence_reg = FENCE_REG_965_0;
		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
	}

2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002
	fence_reg += reg * 8;

	/* To w/a incoherency with non-atomic 64-bit register updates,
	 * we split the 64-bit update into two 32-bit writes. In order
	 * for a partial fence not to be evaluated between writes, we
	 * precede the update with write to turn off the fence register,
	 * and only enable the fence as the last step.
	 *
	 * For extra levels of paranoia, we make sure each step lands
	 * before applying the next step.
	 */
	I915_WRITE(fence_reg, 0);
	POSTING_READ(fence_reg);

3003
	if (obj) {
3004
		u32 size = i915_gem_obj_ggtt_size(obj);
3005
		uint64_t val;
3006

3007
		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3008
				 0xfffff000) << 32;
3009
		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3010
		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3011 3012 3013
		if (obj->tiling_mode == I915_TILING_Y)
			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
		val |= I965_FENCE_REG_VALID;
3014

3015 3016 3017 3018 3019 3020 3021 3022 3023
		I915_WRITE(fence_reg + 4, val >> 32);
		POSTING_READ(fence_reg + 4);

		I915_WRITE(fence_reg + 0, val);
		POSTING_READ(fence_reg);
	} else {
		I915_WRITE(fence_reg + 4, 0);
		POSTING_READ(fence_reg + 4);
	}
3024 3025
}

3026 3027
static void i915_write_fence_reg(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj)
3028
{
3029
	struct drm_i915_private *dev_priv = dev->dev_private;
3030
	u32 val;
3031

3032
	if (obj) {
3033
		u32 size = i915_gem_obj_ggtt_size(obj);
3034 3035
		int pitch_val;
		int tile_width;
3036

3037
		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3038
		     (size & -size) != size ||
3039 3040 3041
		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3042

3043 3044 3045 3046 3047 3048 3049 3050 3051
		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
			tile_width = 128;
		else
			tile_width = 512;

		/* Note: pitch better be a power of two tile widths */
		pitch_val = obj->stride / tile_width;
		pitch_val = ffs(pitch_val) - 1;

3052
		val = i915_gem_obj_ggtt_offset(obj);
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
		if (obj->tiling_mode == I915_TILING_Y)
			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
		val |= I915_FENCE_SIZE_BITS(size);
		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
		val |= I830_FENCE_REG_VALID;
	} else
		val = 0;

	if (reg < 8)
		reg = FENCE_REG_830_0 + reg * 4;
	else
		reg = FENCE_REG_945_8 + (reg - 8) * 4;

	I915_WRITE(reg, val);
	POSTING_READ(reg);
3068 3069
}

3070 3071
static void i830_write_fence_reg(struct drm_device *dev, int reg,
				struct drm_i915_gem_object *obj)
3072
{
3073
	struct drm_i915_private *dev_priv = dev->dev_private;
3074 3075
	uint32_t val;

3076
	if (obj) {
3077
		u32 size = i915_gem_obj_ggtt_size(obj);
3078
		uint32_t pitch_val;
3079

3080
		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3081
		     (size & -size) != size ||
3082 3083 3084
		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
		     i915_gem_obj_ggtt_offset(obj), size);
3085

3086 3087
		pitch_val = obj->stride / 128;
		pitch_val = ffs(pitch_val) - 1;
3088

3089
		val = i915_gem_obj_ggtt_offset(obj);
3090 3091 3092 3093 3094 3095 3096
		if (obj->tiling_mode == I915_TILING_Y)
			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
		val |= I830_FENCE_SIZE_BITS(size);
		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
		val |= I830_FENCE_REG_VALID;
	} else
		val = 0;
3097

3098 3099 3100 3101
	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
	POSTING_READ(FENCE_REG_830_0 + reg * 4);
}

3102 3103 3104 3105 3106
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
{
	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
}

3107 3108 3109
static void i915_gem_write_fence(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj)
{
3110 3111 3112 3113 3114 3115 3116 3117
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* Ensure that all CPU reads are completed before installing a fence
	 * and all writes before removing the fence.
	 */
	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
		mb();

3118 3119 3120 3121
	WARN(obj && (!obj->stride || !obj->tiling_mode),
	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
	     obj->stride, obj->tiling_mode);

3122
	switch (INTEL_INFO(dev)->gen) {
3123
	case 8:
3124
	case 7:
3125
	case 6:
3126 3127 3128 3129
	case 5:
	case 4: i965_write_fence_reg(dev, reg, obj); break;
	case 3: i915_write_fence_reg(dev, reg, obj); break;
	case 2: i830_write_fence_reg(dev, reg, obj); break;
3130
	default: BUG();
3131
	}
3132 3133 3134 3135 3136 3137

	/* And similarly be paranoid that no direct access to this region
	 * is reordered to before the fence is installed.
	 */
	if (i915_gem_object_needs_mb(obj))
		mb();
3138 3139
}

3140 3141 3142 3143 3144 3145 3146 3147 3148 3149
static inline int fence_number(struct drm_i915_private *dev_priv,
			       struct drm_i915_fence_reg *fence)
{
	return fence - dev_priv->fence_regs;
}

static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
					 struct drm_i915_fence_reg *fence,
					 bool enable)
{
3150
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3151 3152 3153
	int reg = fence_number(dev_priv, fence);

	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3154 3155

	if (enable) {
3156
		obj->fence_reg = reg;
3157 3158 3159 3160 3161 3162 3163
		fence->obj = obj;
		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
	} else {
		obj->fence_reg = I915_FENCE_REG_NONE;
		fence->obj = NULL;
		list_del_init(&fence->lru_list);
	}
3164
	obj->fence_dirty = false;
3165 3166
}

3167
static int
3168
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3169
{
3170
	if (obj->last_fenced_seqno) {
3171
		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3172 3173
		if (ret)
			return ret;
3174 3175 3176 3177

		obj->last_fenced_seqno = 0;
	}

3178
	obj->fenced_gpu_access = false;
3179 3180 3181 3182 3183 3184
	return 0;
}

int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
3185
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3186
	struct drm_i915_fence_reg *fence;
3187 3188
	int ret;

3189
	ret = i915_gem_object_wait_fence(obj);
3190 3191 3192
	if (ret)
		return ret;

3193 3194
	if (obj->fence_reg == I915_FENCE_REG_NONE)
		return 0;
3195

3196 3197
	fence = &dev_priv->fence_regs[obj->fence_reg];

3198 3199 3200
	if (WARN_ON(fence->pin_count))
		return -EBUSY;

3201
	i915_gem_object_fence_lost(obj);
3202
	i915_gem_object_update_fence(obj, fence, false);
3203 3204 3205 3206 3207

	return 0;
}

static struct drm_i915_fence_reg *
C
Chris Wilson 已提交
3208
i915_find_fence_reg(struct drm_device *dev)
3209 3210
{
	struct drm_i915_private *dev_priv = dev->dev_private;
C
Chris Wilson 已提交
3211
	struct drm_i915_fence_reg *reg, *avail;
3212
	int i;
3213 3214

	/* First try to find a free reg */
3215
	avail = NULL;
3216 3217 3218
	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
		reg = &dev_priv->fence_regs[i];
		if (!reg->obj)
3219
			return reg;
3220

3221
		if (!reg->pin_count)
3222
			avail = reg;
3223 3224
	}

3225
	if (avail == NULL)
3226
		goto deadlock;
3227 3228

	/* None available, try to steal one or wait for a user to finish */
3229
	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3230
		if (reg->pin_count)
3231 3232
			continue;

C
Chris Wilson 已提交
3233
		return reg;
3234 3235
	}

3236 3237 3238 3239 3240 3241
deadlock:
	/* Wait for completion of pending flips which consume fences */
	if (intel_has_pending_fb_unpin(dev))
		return ERR_PTR(-EAGAIN);

	return ERR_PTR(-EDEADLK);
3242 3243
}

3244
/**
3245
 * i915_gem_object_get_fence - set up fencing for an object
3246 3247 3248 3249 3250 3251 3252 3253 3254
 * @obj: object to map through a fence reg
 *
 * When mapping objects through the GTT, userspace wants to be able to write
 * to them without having to worry about swizzling if the object is tiled.
 * This function walks the fence regs looking for a free one for @obj,
 * stealing one if it can't find any.
 *
 * It then sets up the reg based on the object's properties: address, pitch
 * and tiling format.
3255 3256
 *
 * For an untiled surface, this removes any existing fence.
3257
 */
3258
int
3259
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3260
{
3261
	struct drm_device *dev = obj->base.dev;
J
Jesse Barnes 已提交
3262
	struct drm_i915_private *dev_priv = dev->dev_private;
3263
	bool enable = obj->tiling_mode != I915_TILING_NONE;
3264
	struct drm_i915_fence_reg *reg;
3265
	int ret;
3266

3267 3268 3269
	/* Have we updated the tiling parameters upon the object and so
	 * will need to serialise the write to the associated fence register?
	 */
3270
	if (obj->fence_dirty) {
3271
		ret = i915_gem_object_wait_fence(obj);
3272 3273 3274
		if (ret)
			return ret;
	}
3275

3276
	/* Just update our place in the LRU if our fence is getting reused. */
3277 3278
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		reg = &dev_priv->fence_regs[obj->fence_reg];
3279
		if (!obj->fence_dirty) {
3280 3281 3282 3283 3284 3285
			list_move_tail(&reg->lru_list,
				       &dev_priv->mm.fence_list);
			return 0;
		}
	} else if (enable) {
		reg = i915_find_fence_reg(dev);
3286 3287
		if (IS_ERR(reg))
			return PTR_ERR(reg);
3288

3289 3290 3291
		if (reg->obj) {
			struct drm_i915_gem_object *old = reg->obj;

3292
			ret = i915_gem_object_wait_fence(old);
3293 3294 3295
			if (ret)
				return ret;

3296
			i915_gem_object_fence_lost(old);
3297
		}
3298
	} else
3299 3300
		return 0;

3301 3302
	i915_gem_object_update_fence(obj, reg, enable);

3303
	return 0;
3304 3305
}

3306 3307 3308 3309 3310 3311 3312 3313
static bool i915_gem_valid_gtt_space(struct drm_device *dev,
				     struct drm_mm_node *gtt_space,
				     unsigned long cache_level)
{
	struct drm_mm_node *other;

	/* On non-LLC machines we have to be careful when putting differing
	 * types of snoopable memory together to avoid the prefetcher
3314
	 * crossing memory domains and dying.
3315 3316 3317 3318
	 */
	if (HAS_LLC(dev))
		return true;

3319
	if (!drm_mm_node_allocated(gtt_space))
3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
		return true;

	if (list_empty(&gtt_space->node_list))
		return true;

	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
	if (other->allocated && !other->hole_follows && other->color != cache_level)
		return false;

	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
		return false;

	return true;
}

static void i915_gem_verify_gtt(struct drm_device *dev)
{
#if WATCH_GTT
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	int err = 0;

3343
	list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3344 3345 3346 3347 3348 3349 3350 3351
		if (obj->gtt_space == NULL) {
			printk(KERN_ERR "object found on GTT list with no space reserved\n");
			err++;
			continue;
		}

		if (obj->cache_level != obj->gtt_space->color) {
			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3352 3353
			       i915_gem_obj_ggtt_offset(obj),
			       i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3354 3355 3356 3357 3358 3359 3360 3361 3362 3363
			       obj->cache_level,
			       obj->gtt_space->color);
			err++;
			continue;
		}

		if (!i915_gem_valid_gtt_space(dev,
					      obj->gtt_space,
					      obj->cache_level)) {
			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3364 3365
			       i915_gem_obj_ggtt_offset(obj),
			       i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3366 3367 3368 3369 3370 3371 3372 3373 3374 3375
			       obj->cache_level);
			err++;
			continue;
		}
	}

	WARN_ON(err);
#endif
}

3376 3377 3378
/**
 * Finds free space in the GTT aperture and binds the object there.
 */
3379
static struct i915_vma *
3380 3381 3382
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
			   struct i915_address_space *vm,
			   unsigned alignment,
3383
			   uint64_t flags)
3384
{
3385
	struct drm_device *dev = obj->base.dev;
3386
	struct drm_i915_private *dev_priv = dev->dev_private;
3387
	u32 size, fence_size, fence_alignment, unfenced_alignment;
3388 3389 3390
	unsigned long start =
		flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
	unsigned long end =
3391
		flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
B
Ben Widawsky 已提交
3392
	struct i915_vma *vma;
3393
	int ret;
3394

3395 3396 3397 3398 3399
	fence_size = i915_gem_get_gtt_size(dev,
					   obj->base.size,
					   obj->tiling_mode);
	fence_alignment = i915_gem_get_gtt_alignment(dev,
						     obj->base.size,
3400
						     obj->tiling_mode, true);
3401
	unfenced_alignment =
3402
		i915_gem_get_gtt_alignment(dev,
3403 3404
					   obj->base.size,
					   obj->tiling_mode, false);
3405

3406
	if (alignment == 0)
3407
		alignment = flags & PIN_MAPPABLE ? fence_alignment :
3408
						unfenced_alignment;
3409
	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3410
		DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3411
		return ERR_PTR(-EINVAL);
3412 3413
	}

3414
	size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3415

3416 3417 3418
	/* If the object is bigger than the entire aperture, reject it early
	 * before evicting everything in a vain attempt to find space.
	 */
3419 3420
	if (obj->base.size > end) {
		DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3421
			  obj->base.size,
3422
			  flags & PIN_MAPPABLE ? "mappable" : "total",
3423
			  end);
3424
		return ERR_PTR(-E2BIG);
3425 3426
	}

3427
	ret = i915_gem_object_get_pages(obj);
C
Chris Wilson 已提交
3428
	if (ret)
3429
		return ERR_PTR(ret);
C
Chris Wilson 已提交
3430

3431 3432
	i915_gem_object_pin_pages(obj);

3433
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3434
	if (IS_ERR(vma))
3435
		goto err_unpin;
B
Ben Widawsky 已提交
3436

3437
search_free:
3438
	ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3439
						  size, alignment,
3440 3441
						  obj->cache_level,
						  start, end,
3442 3443
						  DRM_MM_SEARCH_DEFAULT,
						  DRM_MM_CREATE_DEFAULT);
3444
	if (ret) {
3445
		ret = i915_gem_evict_something(dev, vm, size, alignment,
3446 3447 3448
					       obj->cache_level,
					       start, end,
					       flags);
3449 3450
		if (ret == 0)
			goto search_free;
3451

3452
		goto err_free_vma;
3453
	}
B
Ben Widawsky 已提交
3454
	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3455
					      obj->cache_level))) {
B
Ben Widawsky 已提交
3456
		ret = -EINVAL;
3457
		goto err_remove_node;
3458 3459
	}

3460
	ret = i915_gem_gtt_prepare_object(obj);
B
Ben Widawsky 已提交
3461
	if (ret)
3462
		goto err_remove_node;
3463

3464
	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
B
Ben Widawsky 已提交
3465
	list_add_tail(&vma->mm_list, &vm->inactive_list);
3466

3467 3468
	if (i915_is_ggtt(vm)) {
		bool mappable, fenceable;
3469

3470 3471
		fenceable = (vma->node.size == fence_size &&
			     (vma->node.start & (fence_alignment - 1)) == 0);
3472

3473 3474
		mappable = (vma->node.start + obj->base.size <=
			    dev_priv->gtt.mappable_end);
3475

3476
		obj->map_and_fenceable = mappable && fenceable;
3477
	}
3478

3479
	WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3480

3481
	trace_i915_vma_bind(vma, flags);
3482 3483 3484
	vma->bind_vma(vma, obj->cache_level,
		      flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);

3485
	i915_gem_verify_gtt(dev);
3486
	return vma;
B
Ben Widawsky 已提交
3487

3488
err_remove_node:
3489
	drm_mm_remove_node(&vma->node);
3490
err_free_vma:
B
Ben Widawsky 已提交
3491
	i915_gem_vma_destroy(vma);
3492
	vma = ERR_PTR(ret);
3493
err_unpin:
B
Ben Widawsky 已提交
3494
	i915_gem_object_unpin_pages(obj);
3495
	return vma;
3496 3497
}

3498
bool
3499 3500
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			bool force)
3501 3502 3503 3504 3505
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
3506
	if (obj->pages == NULL)
3507
		return false;
3508

3509 3510 3511 3512 3513
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
	if (obj->stolen)
3514
		return false;
3515

3516 3517 3518 3519 3520 3521 3522 3523
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
3524
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3525
		return false;
3526

C
Chris Wilson 已提交
3527
	trace_i915_gem_object_clflush(obj);
3528
	drm_clflush_sg(obj->pages);
3529 3530

	return true;
3531 3532 3533 3534
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3535
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3536
{
C
Chris Wilson 已提交
3537 3538
	uint32_t old_write_domain;

3539
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3540 3541
		return;

3542
	/* No actual flushing is required for the GTT write domain.  Writes
3543 3544
	 * to it immediately go to main memory as far as we know, so there's
	 * no chipset flush.  It also doesn't land in render cache.
3545 3546 3547 3548
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3549
	 */
3550 3551
	wmb();

3552 3553
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3554 3555

	trace_i915_gem_object_change_domain(obj,
3556
					    obj->base.read_domains,
C
Chris Wilson 已提交
3557
					    old_write_domain);
3558 3559 3560 3561
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3562 3563
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
				       bool force)
3564
{
C
Chris Wilson 已提交
3565
	uint32_t old_write_domain;
3566

3567
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3568 3569
		return;

3570 3571 3572
	if (i915_gem_clflush_object(obj, force))
		i915_gem_chipset_flush(obj->base.dev);

3573 3574
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3575 3576

	trace_i915_gem_object_change_domain(obj,
3577
					    obj->base.read_domains,
C
Chris Wilson 已提交
3578
					    old_write_domain);
3579 3580
}

3581 3582 3583 3584 3585 3586
/**
 * Moves a single object to the GTT read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3587
int
3588
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3589
{
3590
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
C
Chris Wilson 已提交
3591
	uint32_t old_write_domain, old_read_domains;
3592
	int ret;
3593

3594
	/* Not valid to be called on unbound objects. */
3595
	if (!i915_gem_obj_bound_any(obj))
3596 3597
		return -EINVAL;

3598 3599 3600
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3601
	ret = i915_gem_object_wait_rendering(obj, !write);
3602 3603 3604
	if (ret)
		return ret;

3605
	i915_gem_object_retire(obj);
3606
	i915_gem_object_flush_cpu_write_domain(obj, false);
C
Chris Wilson 已提交
3607

3608 3609 3610 3611 3612 3613 3614
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3615 3616
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3617

3618 3619 3620
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3621 3622
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3623
	if (write) {
3624 3625 3626
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
		obj->dirty = 1;
3627 3628
	}

C
Chris Wilson 已提交
3629 3630 3631 3632
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3633
	/* And bump the LRU for this access */
B
Ben Widawsky 已提交
3634
	if (i915_gem_object_is_inactive(obj)) {
3635
		struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
B
Ben Widawsky 已提交
3636 3637 3638 3639 3640
		if (vma)
			list_move_tail(&vma->mm_list,
				       &dev_priv->gtt.base.inactive_list);

	}
3641

3642 3643 3644
	return 0;
}

3645 3646 3647
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3648
	struct drm_device *dev = obj->base.dev;
3649
	struct i915_vma *vma, *next;
3650 3651 3652 3653 3654
	int ret;

	if (obj->cache_level == cache_level)
		return 0;

B
Ben Widawsky 已提交
3655
	if (i915_gem_obj_is_pinned(obj)) {
3656 3657 3658 3659
		DRM_DEBUG("can not change the cache level of pinned objects\n");
		return -EBUSY;
	}

3660
	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3661
		if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3662
			ret = i915_vma_unbind(vma);
3663 3664 3665
			if (ret)
				return ret;
		}
3666 3667
	}

3668
	if (i915_gem_obj_bound_any(obj)) {
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678
		ret = i915_gem_object_finish_gpu(obj);
		if (ret)
			return ret;

		i915_gem_object_finish_gtt(obj);

		/* Before SandyBridge, you could not use tiling or fence
		 * registers with snooped memory, so relinquish any fences
		 * currently pointing to our region in the aperture.
		 */
3679
		if (INTEL_INFO(dev)->gen < 6) {
3680 3681 3682 3683 3684
			ret = i915_gem_object_put_fence(obj);
			if (ret)
				return ret;
		}

3685
		list_for_each_entry(vma, &obj->vma_list, vma_link)
3686 3687 3688
			if (drm_mm_node_allocated(&vma->node))
				vma->bind_vma(vma, cache_level,
					      obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
3689 3690
	}

3691 3692 3693 3694 3695
	list_for_each_entry(vma, &obj->vma_list, vma_link)
		vma->node.color = cache_level;
	obj->cache_level = cache_level;

	if (cpu_write_needs_clflush(obj)) {
3696 3697 3698 3699 3700 3701 3702 3703
		u32 old_read_domains, old_write_domain;

		/* If we're coming from LLC cached, then we haven't
		 * actually been tracking whether the data is in the
		 * CPU cache or not, since we only allow one bit set
		 * in obj->write_domain and have been skipping the clflushes.
		 * Just set it to the CPU cache for now.
		 */
3704
		i915_gem_object_retire(obj);
3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717
		WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);

		old_read_domains = obj->base.read_domains;
		old_write_domain = obj->base.write_domain;

		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;

		trace_i915_gem_object_change_domain(obj,
						    old_read_domains,
						    old_write_domain);
	}

3718
	i915_gem_verify_gtt(dev);
3719 3720 3721
	return 0;
}

B
Ben Widawsky 已提交
3722 3723
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3724
{
B
Ben Widawsky 已提交
3725
	struct drm_i915_gem_caching *args = data;
3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738
	struct drm_i915_gem_object *obj;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL) {
		ret = -ENOENT;
		goto unlock;
	}

3739 3740 3741 3742 3743 3744
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3745 3746 3747 3748
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3749 3750 3751 3752
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3753 3754 3755 3756 3757 3758 3759

	drm_gem_object_unreference(&obj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

B
Ben Widawsky 已提交
3760 3761
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3762
{
B
Ben Widawsky 已提交
3763
	struct drm_i915_gem_caching *args = data;
3764 3765 3766 3767
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3768 3769
	switch (args->caching) {
	case I915_CACHING_NONE:
3770 3771
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3772
	case I915_CACHING_CACHED:
3773 3774
		level = I915_CACHE_LLC;
		break;
3775 3776 3777
	case I915_CACHING_DISPLAY:
		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
		break;
3778 3779 3780 3781
	default:
		return -EINVAL;
	}

B
Ben Widawsky 已提交
3782 3783 3784 3785
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL) {
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);

	drm_gem_object_unreference(&obj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

3800 3801
static bool is_pin_display(struct drm_i915_gem_object *obj)
{
3802 3803 3804 3805 3806 3807 3808 3809 3810
	struct i915_vma *vma;

	if (list_empty(&obj->vma_list))
		return false;

	vma = i915_gem_obj_to_ggtt(obj);
	if (!vma)
		return false;

3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821
	/* There are 3 sources that pin objects:
	 *   1. The display engine (scanouts, sprites, cursors);
	 *   2. Reservations for execbuffer;
	 *   3. The user.
	 *
	 * We can ignore reservations as we hold the struct_mutex and
	 * are only called outside of the reservation path.  The user
	 * can only increment pin_count once, and so if after
	 * subtracting the potential reference by the user, any pin_count
	 * remains, it must be due to another use by the display engine.
	 */
3822
	return vma->pin_count - !!obj->user_pin_count;
3823 3824
}

3825
/*
3826 3827 3828
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3829 3830
 */
int
3831 3832
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3833
				     struct intel_engine_cs *pipelined)
3834
{
3835
	u32 old_read_domains, old_write_domain;
3836
	bool was_pin_display;
3837 3838
	int ret;

3839
	if (pipelined != obj->ring) {
3840 3841
		ret = i915_gem_object_sync(obj, pipelined);
		if (ret)
3842 3843 3844
			return ret;
	}

3845 3846 3847
	/* Mark the pin_display early so that we account for the
	 * display coherency whilst setting up the cache domains.
	 */
3848
	was_pin_display = obj->pin_display;
3849 3850
	obj->pin_display = true;

3851 3852 3853 3854 3855 3856 3857 3858 3859
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3860 3861
	ret = i915_gem_object_set_cache_level(obj,
					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3862
	if (ret)
3863
		goto err_unpin_display;
3864

3865 3866 3867 3868
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
	 * always use map_and_fenceable for all scanout buffers.
	 */
3869
	ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3870
	if (ret)
3871
		goto err_unpin_display;
3872

3873
	i915_gem_object_flush_cpu_write_domain(obj, true);
3874

3875
	old_write_domain = obj->base.write_domain;
3876
	old_read_domains = obj->base.read_domains;
3877 3878 3879 3880

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3881
	obj->base.write_domain = 0;
3882
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3883 3884 3885

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3886
					    old_write_domain);
3887 3888

	return 0;
3889 3890

err_unpin_display:
3891 3892
	WARN_ON(was_pin_display != is_pin_display(obj));
	obj->pin_display = was_pin_display;
3893 3894 3895 3896 3897 3898
	return ret;
}

void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
{
B
Ben Widawsky 已提交
3899
	i915_gem_object_ggtt_unpin(obj);
3900
	obj->pin_display = is_pin_display(obj);
3901 3902
}

3903
int
3904
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3905
{
3906 3907
	int ret;

3908
	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3909 3910
		return 0;

3911
	ret = i915_gem_object_wait_rendering(obj, false);
3912 3913 3914
	if (ret)
		return ret;

3915 3916
	/* Ensure that we invalidate the GPU's caches and TLBs. */
	obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3917
	return 0;
3918 3919
}

3920 3921 3922 3923 3924 3925
/**
 * Moves a single object to the CPU read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3926
int
3927
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3928
{
C
Chris Wilson 已提交
3929
	uint32_t old_write_domain, old_read_domains;
3930 3931
	int ret;

3932 3933 3934
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3935
	ret = i915_gem_object_wait_rendering(obj, !write);
3936 3937 3938
	if (ret)
		return ret;

3939
	i915_gem_object_retire(obj);
3940
	i915_gem_object_flush_gtt_write_domain(obj);
3941

3942 3943
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3944

3945
	/* Flush the CPU cache if it's still invalid. */
3946
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3947
		i915_gem_clflush_object(obj, false);
3948

3949
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3950 3951 3952 3953 3954
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3955
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3956 3957 3958 3959 3960

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3961 3962
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3963
	}
3964

C
Chris Wilson 已提交
3965 3966 3967 3968
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3969 3970 3971
	return 0;
}

3972 3973 3974
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3975 3976 3977 3978
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3979 3980 3981
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3982
static int
3983
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3984
{
3985 3986
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_file_private *file_priv = file->driver_priv;
3987
	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3988
	struct drm_i915_gem_request *request;
3989
	struct intel_engine_cs *ring = NULL;
3990
	unsigned reset_counter;
3991 3992
	u32 seqno = 0;
	int ret;
3993

3994 3995 3996 3997 3998 3999 4000
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
	if (ret)
		return ret;

	ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
	if (ret)
		return ret;
4001

4002
	spin_lock(&file_priv->mm.lock);
4003
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4004 4005
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
4006

4007 4008
		ring = request->ring;
		seqno = request->seqno;
4009
	}
4010
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4011
	spin_unlock(&file_priv->mm.lock);
4012

4013 4014
	if (seqno == 0)
		return 0;
4015

4016
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
4017 4018
	if (ret == 0)
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4019 4020 4021 4022

	return ret;
}

4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041
static bool
i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
{
	struct drm_i915_gem_object *obj = vma->obj;

	if (alignment &&
	    vma->node.start & (alignment - 1))
		return true;

	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
		return true;

	if (flags & PIN_OFFSET_BIAS &&
	    vma->node.start < (flags & PIN_OFFSET_MASK))
		return true;

	return false;
}

4042
int
4043
i915_gem_object_pin(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
4044
		    struct i915_address_space *vm,
4045
		    uint32_t alignment,
4046
		    uint64_t flags)
4047
{
4048
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4049
	struct i915_vma *vma;
4050 4051
	int ret;

4052 4053 4054
	if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
		return -ENODEV;

4055
	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4056
		return -EINVAL;
4057 4058 4059

	vma = i915_gem_obj_to_vma(obj, vm);
	if (vma) {
B
Ben Widawsky 已提交
4060 4061 4062
		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
			return -EBUSY;

4063
		if (i915_vma_misplaced(vma, alignment, flags)) {
B
Ben Widawsky 已提交
4064
			WARN(vma->pin_count,
4065
			     "bo is already pinned with incorrect alignment:"
4066
			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4067
			     " obj->map_and_fenceable=%d\n",
4068
			     i915_gem_obj_offset(obj, vm), alignment,
4069
			     !!(flags & PIN_MAPPABLE),
4070
			     obj->map_and_fenceable);
4071
			ret = i915_vma_unbind(vma);
4072 4073
			if (ret)
				return ret;
4074 4075

			vma = NULL;
4076 4077 4078
		}
	}

4079
	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4080 4081 4082
		vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
		if (IS_ERR(vma))
			return PTR_ERR(vma);
4083
	}
J
Jesse Barnes 已提交
4084

4085 4086
	if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
		vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
4087

4088
	vma->pin_count++;
4089 4090
	if (flags & PIN_MAPPABLE)
		obj->pin_mappable |= true;
4091 4092 4093 4094 4095

	return 0;
}

void
B
Ben Widawsky 已提交
4096
i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
4097
{
B
Ben Widawsky 已提交
4098
	struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
4099

B
Ben Widawsky 已提交
4100 4101 4102 4103 4104
	BUG_ON(!vma);
	BUG_ON(vma->pin_count == 0);
	BUG_ON(!i915_gem_obj_ggtt_bound(obj));

	if (--vma->pin_count == 0)
4105
		obj->pin_mappable = false;
4106 4107
}

4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133
bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
		struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);

		WARN_ON(!ggtt_vma ||
			dev_priv->fence_regs[obj->fence_reg].pin_count >
			ggtt_vma->pin_count);
		dev_priv->fence_regs[obj->fence_reg].pin_count++;
		return true;
	} else
		return false;
}

void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
		dev_priv->fence_regs[obj->fence_reg].pin_count--;
	}
}

4134 4135
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4136
		   struct drm_file *file)
4137 4138
{
	struct drm_i915_gem_pin *args = data;
4139
	struct drm_i915_gem_object *obj;
4140 4141
	int ret;

4142 4143 4144
	if (INTEL_INFO(dev)->gen >= 6)
		return -ENODEV;

4145 4146 4147
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
4148

4149
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4150
	if (&obj->base == NULL) {
4151 4152
		ret = -ENOENT;
		goto unlock;
4153 4154
	}

4155
	if (obj->madv != I915_MADV_WILLNEED) {
4156
		DRM_DEBUG("Attempting to pin a purgeable buffer\n");
4157
		ret = -EFAULT;
4158
		goto out;
4159 4160
	}

4161
	if (obj->pin_filp != NULL && obj->pin_filp != file) {
4162
		DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
J
Jesse Barnes 已提交
4163
			  args->handle);
4164 4165
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
4166 4167
	}

4168 4169 4170 4171 4172
	if (obj->user_pin_count == ULONG_MAX) {
		ret = -EBUSY;
		goto out;
	}

4173
	if (obj->user_pin_count == 0) {
4174
		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
4175 4176
		if (ret)
			goto out;
4177 4178
	}

4179 4180 4181
	obj->user_pin_count++;
	obj->pin_filp = file;

4182
	args->offset = i915_gem_obj_ggtt_offset(obj);
4183
out:
4184
	drm_gem_object_unreference(&obj->base);
4185
unlock:
4186
	mutex_unlock(&dev->struct_mutex);
4187
	return ret;
4188 4189 4190 4191
}

int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4192
		     struct drm_file *file)
4193 4194
{
	struct drm_i915_gem_pin *args = data;
4195
	struct drm_i915_gem_object *obj;
4196
	int ret;
4197

4198 4199 4200
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
4201

4202
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4203
	if (&obj->base == NULL) {
4204 4205
		ret = -ENOENT;
		goto unlock;
4206
	}
4207

4208
	if (obj->pin_filp != file) {
4209
		DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
J
Jesse Barnes 已提交
4210
			  args->handle);
4211 4212
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
4213
	}
4214 4215 4216
	obj->user_pin_count--;
	if (obj->user_pin_count == 0) {
		obj->pin_filp = NULL;
B
Ben Widawsky 已提交
4217
		i915_gem_object_ggtt_unpin(obj);
J
Jesse Barnes 已提交
4218
	}
4219

4220
out:
4221
	drm_gem_object_unreference(&obj->base);
4222
unlock:
4223
	mutex_unlock(&dev->struct_mutex);
4224
	return ret;
4225 4226 4227 4228
}

int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4229
		    struct drm_file *file)
4230 4231
{
	struct drm_i915_gem_busy *args = data;
4232
	struct drm_i915_gem_object *obj;
4233 4234
	int ret;

4235
	ret = i915_mutex_lock_interruptible(dev);
4236
	if (ret)
4237
		return ret;
4238

4239 4240
	intel_edp_psr_exit(dev, true);

4241
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4242
	if (&obj->base == NULL) {
4243 4244
		ret = -ENOENT;
		goto unlock;
4245
	}
4246

4247 4248 4249 4250
	/* Count all active objects as busy, even if they are currently not used
	 * by the gpu. Users of this interface expect objects to eventually
	 * become non-busy without any further actions, therefore emit any
	 * necessary flushes here.
4251
	 */
4252
	ret = i915_gem_object_flush_active(obj);
4253

4254
	args->busy = obj->active;
4255 4256 4257 4258
	if (obj->ring) {
		BUILD_BUG_ON(I915_NUM_RINGS > 16);
		args->busy |= intel_ring_flag(obj->ring) << 16;
	}
4259

4260
	drm_gem_object_unreference(&obj->base);
4261
unlock:
4262
	mutex_unlock(&dev->struct_mutex);
4263
	return ret;
4264 4265 4266 4267 4268 4269
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4270
	return i915_gem_ring_throttle(dev, file_priv);
4271 4272
}

4273 4274 4275 4276 4277
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct drm_i915_gem_madvise *args = data;
4278
	struct drm_i915_gem_object *obj;
4279
	int ret;
4280 4281 4282 4283 4284 4285 4286 4287 4288

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4289 4290 4291 4292
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

4293
	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4294
	if (&obj->base == NULL) {
4295 4296
		ret = -ENOENT;
		goto unlock;
4297 4298
	}

B
Ben Widawsky 已提交
4299
	if (i915_gem_obj_is_pinned(obj)) {
4300 4301
		ret = -EINVAL;
		goto out;
4302 4303
	}

4304 4305
	if (obj->madv != __I915_MADV_PURGED)
		obj->madv = args->madv;
4306

C
Chris Wilson 已提交
4307 4308
	/* if the object is no longer attached, discard its backing storage */
	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4309 4310
		i915_gem_object_truncate(obj);

4311
	args->retained = obj->madv != __I915_MADV_PURGED;
C
Chris Wilson 已提交
4312

4313
out:
4314
	drm_gem_object_unreference(&obj->base);
4315
unlock:
4316
	mutex_unlock(&dev->struct_mutex);
4317
	return ret;
4318 4319
}

4320 4321
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4322
{
4323
	INIT_LIST_HEAD(&obj->global_list);
4324
	INIT_LIST_HEAD(&obj->ring_list);
4325
	INIT_LIST_HEAD(&obj->obj_exec_link);
B
Ben Widawsky 已提交
4326
	INIT_LIST_HEAD(&obj->vma_list);
4327

4328 4329
	obj->ops = ops;

4330 4331 4332 4333 4334 4335 4336 4337
	obj->fence_reg = I915_FENCE_REG_NONE;
	obj->madv = I915_MADV_WILLNEED;
	/* Avoid an unnecessary call to unbind on the first bind. */
	obj->map_and_fenceable = true;

	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}

4338 4339 4340 4341 4342
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

4343 4344
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
						  size_t size)
4345
{
4346
	struct drm_i915_gem_object *obj;
4347
	struct address_space *mapping;
D
Daniel Vetter 已提交
4348
	gfp_t mask;
4349

4350
	obj = i915_gem_object_alloc(dev);
4351 4352
	if (obj == NULL)
		return NULL;
4353

4354
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4355
		i915_gem_object_free(obj);
4356 4357
		return NULL;
	}
4358

4359 4360 4361 4362 4363 4364 4365
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

A
Al Viro 已提交
4366
	mapping = file_inode(obj->base.filp)->i_mapping;
4367
	mapping_set_gfp_mask(mapping, mask);
4368

4369
	i915_gem_object_init(obj, &i915_gem_object_ops);
4370

4371 4372
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4373

4374 4375
	if (HAS_LLC(dev)) {
		/* On some devices, we can have the GPU use the LLC (the CPU
4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

4391 4392
	trace_i915_gem_object_create(obj);

4393
	return obj;
4394 4395
}

4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

	if (obj->madv != I915_MADV_WILLNEED)
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4420
void i915_gem_free_object(struct drm_gem_object *gem_obj)
4421
{
4422
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4423
	struct drm_device *dev = obj->base.dev;
4424
	struct drm_i915_private *dev_priv = dev->dev_private;
4425
	struct i915_vma *vma, *next;
4426

4427 4428
	intel_runtime_pm_get(dev_priv);

4429 4430
	trace_i915_gem_object_destroy(obj);

4431
	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
B
Ben Widawsky 已提交
4432 4433 4434 4435
		int ret;

		vma->pin_count = 0;
		ret = i915_vma_unbind(vma);
4436 4437
		if (WARN_ON(ret == -ERESTARTSYS)) {
			bool was_interruptible;
4438

4439 4440
			was_interruptible = dev_priv->mm.interruptible;
			dev_priv->mm.interruptible = false;
4441

4442
			WARN_ON(i915_vma_unbind(vma));
4443

4444 4445
			dev_priv->mm.interruptible = was_interruptible;
		}
4446 4447
	}

4448 4449
	i915_gem_object_detach_phys(obj);

B
Ben Widawsky 已提交
4450 4451 4452 4453 4454
	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
	 * before progressing. */
	if (obj->stolen)
		i915_gem_object_unpin_pages(obj);

B
Ben Widawsky 已提交
4455 4456
	if (WARN_ON(obj->pages_pin_count))
		obj->pages_pin_count = 0;
4457
	if (discard_backing_storage(obj))
4458
		obj->madv = I915_MADV_DONTNEED;
4459
	i915_gem_object_put_pages(obj);
4460
	i915_gem_object_free_mmap_offset(obj);
4461

4462 4463
	BUG_ON(obj->pages);

4464 4465
	if (obj->base.import_attach)
		drm_prime_gem_destroy(&obj->base, NULL);
4466

4467 4468 4469
	if (obj->ops->release)
		obj->ops->release(obj);

4470 4471
	drm_gem_object_release(&obj->base);
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
4472

4473
	kfree(obj->bit_17);
4474
	i915_gem_object_free(obj);
4475 4476

	intel_runtime_pm_put(dev_priv);
4477 4478
}

4479
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
4480
				     struct i915_address_space *vm)
4481 4482 4483 4484 4485 4486 4487 4488 4489
{
	struct i915_vma *vma;
	list_for_each_entry(vma, &obj->vma_list, vma_link)
		if (vma->vm == vm)
			return vma;

	return NULL;
}

B
Ben Widawsky 已提交
4490 4491 4492
void i915_gem_vma_destroy(struct i915_vma *vma)
{
	WARN_ON(vma->node.allocated);
4493 4494 4495 4496 4497

	/* Keep the vma as a placeholder in the execbuffer reservation lists */
	if (!list_empty(&vma->exec_list))
		return;

4498
	list_del(&vma->vma_link);
4499

B
Ben Widawsky 已提交
4500 4501 4502
	kfree(vma);
}

4503 4504 4505 4506
static void
i915_gem_stop_ringbuffers(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
4507
	struct intel_engine_cs *ring;
4508 4509 4510 4511 4512 4513
	int i;

	for_each_ring(ring, dev_priv, i)
		intel_stop_ring_buffer(ring);
}

4514
int
4515
i915_gem_suspend(struct drm_device *dev)
4516
{
4517
	struct drm_i915_private *dev_priv = dev->dev_private;
4518
	int ret = 0;
4519

4520
	mutex_lock(&dev->struct_mutex);
4521
	if (dev_priv->ums.mm_suspended)
4522
		goto err;
4523

4524
	ret = i915_gpu_idle(dev);
4525
	if (ret)
4526
		goto err;
4527

4528
	i915_gem_retire_requests(dev);
4529

4530
	/* Under UMS, be paranoid and evict. */
4531
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
C
Chris Wilson 已提交
4532
		i915_gem_evict_everything(dev);
4533 4534

	i915_kernel_lost_context(dev);
4535
	i915_gem_stop_ringbuffers(dev);
4536

4537 4538 4539 4540 4541 4542 4543 4544 4545
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
	 * We need to replace this with a semaphore, or something.
	 * And not confound ums.mm_suspended!
	 */
	dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
							     DRIVER_MODESET);
	mutex_unlock(&dev->struct_mutex);

	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4546
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4547
	cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4548

4549
	return 0;
4550 4551 4552 4553

err:
	mutex_unlock(&dev->struct_mutex);
	return ret;
4554 4555
}

4556
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
B
Ben Widawsky 已提交
4557
{
4558
	struct drm_device *dev = ring->dev;
4559
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 4561
	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4562
	int i, ret;
B
Ben Widawsky 已提交
4563

4564
	if (!HAS_L3_DPF(dev) || !remap_info)
4565
		return 0;
B
Ben Widawsky 已提交
4566

4567 4568 4569
	ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
	if (ret)
		return ret;
B
Ben Widawsky 已提交
4570

4571 4572 4573 4574 4575
	/*
	 * Note: We do not worry about the concurrent register cacheline hang
	 * here because no other code should access these registers other than
	 * at initialization time.
	 */
B
Ben Widawsky 已提交
4576
	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4577 4578 4579
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, reg_base + i);
		intel_ring_emit(ring, remap_info[i/4]);
B
Ben Widawsky 已提交
4580 4581
	}

4582
	intel_ring_advance(ring);
B
Ben Widawsky 已提交
4583

4584
	return ret;
B
Ben Widawsky 已提交
4585 4586
}

4587 4588
void i915_gem_init_swizzling(struct drm_device *dev)
{
4589
	struct drm_i915_private *dev_priv = dev->dev_private;
4590

4591
	if (INTEL_INFO(dev)->gen < 5 ||
4592 4593 4594 4595 4596 4597
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4598 4599 4600
	if (IS_GEN5(dev))
		return;

4601 4602
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
	if (IS_GEN6(dev))
4603
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4604
	else if (IS_GEN7(dev))
4605
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
B
Ben Widawsky 已提交
4606 4607
	else if (IS_GEN8(dev))
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4608 4609
	else
		BUG();
4610
}
D
Daniel Vetter 已提交
4611

4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627
static bool
intel_enable_blt(struct drm_device *dev)
{
	if (!HAS_BLT(dev))
		return false;

	/* The blitter was dysfunctional on early prototypes */
	if (IS_GEN6(dev) && dev->pdev->revision < 8) {
		DRM_INFO("BLT not supported on this pre-production hardware;"
			 " graphics performance will be degraded.\n");
		return false;
	}

	return true;
}

4628
static int i915_gem_init_rings(struct drm_device *dev)
4629
{
4630
	struct drm_i915_private *dev_priv = dev->dev_private;
4631
	int ret;
4632

4633
	ret = intel_init_render_ring_buffer(dev);
4634
	if (ret)
4635
		return ret;
4636 4637

	if (HAS_BSD(dev)) {
4638
		ret = intel_init_bsd_ring_buffer(dev);
4639 4640
		if (ret)
			goto cleanup_render_ring;
4641
	}
4642

4643
	if (intel_enable_blt(dev)) {
4644 4645 4646 4647 4648
		ret = intel_init_blt_ring_buffer(dev);
		if (ret)
			goto cleanup_bsd_ring;
	}

B
Ben Widawsky 已提交
4649 4650 4651 4652 4653 4654
	if (HAS_VEBOX(dev)) {
		ret = intel_init_vebox_ring_buffer(dev);
		if (ret)
			goto cleanup_blt_ring;
	}

4655 4656 4657 4658 4659
	if (HAS_BSD2(dev)) {
		ret = intel_init_bsd2_ring_buffer(dev);
		if (ret)
			goto cleanup_vebox_ring;
	}
B
Ben Widawsky 已提交
4660

4661
	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4662
	if (ret)
4663
		goto cleanup_bsd2_ring;
4664 4665 4666

	return 0;

4667 4668
cleanup_bsd2_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
B
Ben Widawsky 已提交
4669 4670
cleanup_vebox_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683
cleanup_blt_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);

	return ret;
}

int
i915_gem_init_hw(struct drm_device *dev)
{
4684
	struct drm_i915_private *dev_priv = dev->dev_private;
4685
	int ret, i;
4686 4687 4688 4689

	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
		return -EIO;

B
Ben Widawsky 已提交
4690
	if (dev_priv->ellc_size)
4691
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4692

4693 4694 4695
	if (IS_HASWELL(dev))
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4696

4697
	if (HAS_PCH_NOP(dev)) {
4698 4699 4700 4701 4702 4703 4704 4705 4706
		if (IS_IVYBRIDGE(dev)) {
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
		} else if (INTEL_INFO(dev)->gen >= 7) {
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4707 4708
	}

4709 4710 4711
	i915_gem_init_swizzling(dev);

	ret = i915_gem_init_rings(dev);
4712 4713 4714
	if (ret)
		return ret;

4715 4716 4717
	for (i = 0; i < NUM_L3_SLICES(dev); i++)
		i915_gem_l3_remap(&dev_priv->ring[RCS], i);

4718
	/*
4719 4720 4721 4722 4723
	 * XXX: Contexts should only be initialized once. Doing a switch to the
	 * default context switch however is something we'd like to do after
	 * reset or thaw (the latter may not actually be necessary for HW, but
	 * goes with our code better). Context switching requires rings (for
	 * the do_switch), but before enabling PPGTT. So don't move this.
4724
	 */
4725
	ret = i915_gem_context_enable(dev_priv);
4726
	if (ret && ret != -EIO) {
4727
		DRM_ERROR("Context enable failed %d\n", ret);
4728
		i915_gem_cleanup_ringbuffer(dev);
4729
	}
D
Daniel Vetter 已提交
4730

4731
	return ret;
4732 4733
}

4734 4735 4736 4737 4738 4739
int i915_gem_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	mutex_lock(&dev->struct_mutex);
4740 4741 4742

	if (IS_VALLEYVIEW(dev)) {
		/* VLVA0 (potential hack), BIOS isn't actually waking us */
4743 4744 4745
		I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
		if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
			      VLV_GTLC_ALLOWWAKEACK), 10))
4746 4747 4748
			DRM_DEBUG_DRIVER("allow wake ack timed out\n");
	}

4749
	i915_gem_init_userptr(dev);
4750
	i915_gem_init_global_gtt(dev);
4751

4752
	ret = i915_gem_context_init(dev);
4753 4754
	if (ret) {
		mutex_unlock(&dev->struct_mutex);
4755
		return ret;
4756
	}
4757

4758
	ret = i915_gem_init_hw(dev);
4759 4760 4761 4762 4763 4764 4765 4766
	if (ret == -EIO) {
		/* Allow ring initialisation to fail by marking the GPU as
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
		atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
		ret = 0;
4767
	}
4768
	mutex_unlock(&dev->struct_mutex);
4769

4770 4771 4772
	/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		dev_priv->dri1.allow_batchbuffer = 1;
4773
	return ret;
4774 4775
}

4776 4777 4778
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
4779
	struct drm_i915_private *dev_priv = dev->dev_private;
4780
	struct intel_engine_cs *ring;
4781
	int i;
4782

4783 4784
	for_each_ring(ring, dev_priv, i)
		intel_cleanup_ring_buffer(ring);
4785 4786
}

4787 4788 4789 4790
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4791
	struct drm_i915_private *dev_priv = dev->dev_private;
4792
	int ret;
4793

J
Jesse Barnes 已提交
4794 4795 4796
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

4797
	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4798
		DRM_ERROR("Reenabling wedged hardware, good luck\n");
4799
		atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4800 4801 4802
	}

	mutex_lock(&dev->struct_mutex);
4803
	dev_priv->ums.mm_suspended = 0;
4804

4805
	ret = i915_gem_init_hw(dev);
4806 4807
	if (ret != 0) {
		mutex_unlock(&dev->struct_mutex);
4808
		return ret;
4809
	}
4810

4811
	BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4812

4813
	ret = drm_irq_install(dev, dev->pdev->irq);
4814 4815
	if (ret)
		goto cleanup_ringbuffer;
4816
	mutex_unlock(&dev->struct_mutex);
4817

4818
	return 0;
4819 4820 4821

cleanup_ringbuffer:
	i915_gem_cleanup_ringbuffer(dev);
4822
	dev_priv->ums.mm_suspended = 1;
4823 4824 4825
	mutex_unlock(&dev->struct_mutex);

	return ret;
4826 4827 4828 4829 4830 4831
}

int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
J
Jesse Barnes 已提交
4832 4833 4834
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

4835
	mutex_lock(&dev->struct_mutex);
4836
	drm_irq_uninstall(dev);
4837
	mutex_unlock(&dev->struct_mutex);
4838

4839
	return i915_gem_suspend(dev);
4840 4841 4842 4843 4844 4845 4846
}

void
i915_gem_lastclose(struct drm_device *dev)
{
	int ret;

4847 4848 4849
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return;

4850
	ret = i915_gem_suspend(dev);
4851 4852
	if (ret)
		DRM_ERROR("failed to idle hardware: %d\n", ret);
4853 4854
}

4855
static void
4856
init_ring_lists(struct intel_engine_cs *ring)
4857 4858 4859 4860 4861
{
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
}

4862 4863
void i915_init_vm(struct drm_i915_private *dev_priv,
		  struct i915_address_space *vm)
B
Ben Widawsky 已提交
4864
{
4865 4866
	if (!i915_is_ggtt(vm))
		drm_mm_init(&vm->mm, vm->start, vm->total);
B
Ben Widawsky 已提交
4867 4868 4869 4870
	vm->dev = dev_priv->dev;
	INIT_LIST_HEAD(&vm->active_list);
	INIT_LIST_HEAD(&vm->inactive_list);
	INIT_LIST_HEAD(&vm->global_link);
4871
	list_add_tail(&vm->global_link, &dev_priv->vm_list);
B
Ben Widawsky 已提交
4872 4873
}

4874 4875 4876
void
i915_gem_load(struct drm_device *dev)
{
4877
	struct drm_i915_private *dev_priv = dev->dev_private;
4878 4879 4880 4881 4882 4883 4884
	int i;

	dev_priv->slab =
		kmem_cache_create("i915_gem_object",
				  sizeof(struct drm_i915_gem_object), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
4885

B
Ben Widawsky 已提交
4886 4887 4888
	INIT_LIST_HEAD(&dev_priv->vm_list);
	i915_init_vm(dev_priv, &dev_priv->gtt.base);

4889
	INIT_LIST_HEAD(&dev_priv->context_list);
C
Chris Wilson 已提交
4890 4891
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4892
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4893 4894
	for (i = 0; i < I915_NUM_RINGS; i++)
		init_ring_lists(&dev_priv->ring[i]);
4895
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4896
		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4897 4898
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
			  i915_gem_retire_work_handler);
4899 4900
	INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
			  i915_gem_idle_work_handler);
4901
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4902

4903
	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4904
	if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
4905 4906
		I915_WRITE(MI_ARB_STATE,
			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4907 4908
	}

4909 4910
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

4911
	/* Old X drivers will take 0-2 for front, back, depth buffers */
4912 4913
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		dev_priv->fence_reg_start = 3;
4914

4915 4916 4917
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
		dev_priv->num_fence_regs = 32;
	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4918 4919 4920 4921
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4922
	/* Initialize fence registers to zero */
4923 4924
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
	i915_gem_restore_fences(dev);
4925

4926
	i915_gem_detect_bit_6_swizzle(dev);
4927
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4928

4929 4930
	dev_priv->mm.interruptible = true;

4931 4932 4933 4934
	dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
	dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
	dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&dev_priv->mm.shrinker);
4935 4936 4937

	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
	register_oom_notifier(&dev_priv->mm.oom_notifier);
4938
}
4939

4940
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4941
{
4942
	struct drm_i915_file_private *file_priv = file->driver_priv;
4943

4944 4945
	cancel_delayed_work_sync(&file_priv->mm.idle_work);

4946 4947 4948 4949
	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4950
	spin_lock(&file_priv->mm.lock);
4951 4952 4953 4954 4955 4956 4957 4958 4959
	while (!list_empty(&file_priv->mm.request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&file_priv->mm.request_list,
					   struct drm_i915_gem_request,
					   client_list);
		list_del(&request->client_list);
		request->file_priv = NULL;
	}
4960
	spin_unlock(&file_priv->mm.lock);
4961
}
4962

4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974
static void
i915_gem_file_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_file_private *file_priv =
		container_of(work, typeof(*file_priv), mm.idle_work.work);

	atomic_set(&file_priv->rps_wait_boost, false);
}

int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;
4975
	int ret;
4976 4977 4978 4979 4980 4981 4982 4983 4984

	DRM_DEBUG_DRIVER("\n");

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
	file_priv->dev_priv = dev->dev_private;
4985
	file_priv->file = file;
4986 4987 4988 4989 4990 4991

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);
	INIT_DELAYED_WORK(&file_priv->mm.idle_work,
			  i915_gem_file_idle_work_handler);

4992 4993 4994
	ret = i915_gem_context_open(dev, file);
	if (ret)
		kfree(file_priv);
4995

4996
	return ret;
4997 4998
}

4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
	if (!mutex_is_locked(mutex))
		return false;

#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
	return mutex->owner == task;
#else
	/* Since UP may be pre-empted, we cannot assume that we own the lock */
	return false;
#endif
}

5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
	if (!mutex_trylock(&dev->struct_mutex)) {
		if (!mutex_is_locked_by(&dev->struct_mutex, current))
			return false;

		if (to_i915(dev)->mm.shrinker_no_lock_stealing)
			return false;

		*unlock = false;
	} else
		*unlock = true;

	return true;
}

5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039
static int num_vma_bound(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;
	int count = 0;

	list_for_each_entry(vma, &obj->vma_list, vma_link)
		if (drm_mm_node_allocated(&vma->node))
			count++;

	return count;
}

5040
static unsigned long
5041
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5042
{
5043
	struct drm_i915_private *dev_priv =
5044
		container_of(shrinker, struct drm_i915_private, mm.shrinker);
5045
	struct drm_device *dev = dev_priv->dev;
C
Chris Wilson 已提交
5046
	struct drm_i915_gem_object *obj;
5047
	unsigned long count;
5048
	bool unlock;
5049

5050 5051
	if (!i915_gem_shrinker_lock(dev, &unlock))
		return 0;
5052

5053
	count = 0;
5054
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5055
		if (obj->pages_pin_count == 0)
5056
			count += obj->base.size >> PAGE_SHIFT;
5057 5058

	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5059 5060
		if (!i915_gem_obj_is_pinned(obj) &&
		    obj->pages_pin_count == num_vma_bound(obj))
5061
			count += obj->base.size >> PAGE_SHIFT;
5062
	}
5063

5064 5065
	if (unlock)
		mutex_unlock(&dev->struct_mutex);
5066

5067
	return count;
5068
}
5069 5070 5071 5072 5073 5074 5075 5076

/* All the new VM stuff */
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
				  struct i915_address_space *vm)
{
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
	struct i915_vma *vma;

5077 5078
	if (!dev_priv->mm.aliasing_ppgtt ||
	    vm == &dev_priv->mm.aliasing_ppgtt->base)
5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095
		vm = &dev_priv->gtt.base;

	BUG_ON(list_empty(&o->vma_list));
	list_for_each_entry(vma, &o->vma_list, vma_link) {
		if (vma->vm == vm)
			return vma->node.start;

	}
	return -1;
}

bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
			struct i915_address_space *vm)
{
	struct i915_vma *vma;

	list_for_each_entry(vma, &o->vma_list, vma_link)
5096
		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5097 5098 5099 5100 5101 5102 5103
			return true;

	return false;
}

bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
5104
	struct i915_vma *vma;
5105

5106 5107
	list_for_each_entry(vma, &o->vma_list, vma_link)
		if (drm_mm_node_allocated(&vma->node))
5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118
			return true;

	return false;
}

unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
				struct i915_address_space *vm)
{
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
	struct i915_vma *vma;

5119 5120
	if (!dev_priv->mm.aliasing_ppgtt ||
	    vm == &dev_priv->mm.aliasing_ppgtt->base)
5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131
		vm = &dev_priv->gtt.base;

	BUG_ON(list_empty(&o->vma_list));

	list_for_each_entry(vma, &o->vma_list, vma_link)
		if (vma->vm == vm)
			return vma->node.size;

	return 0;
}

5132
static unsigned long
5133
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5134 5135
{
	struct drm_i915_private *dev_priv =
5136
		container_of(shrinker, struct drm_i915_private, mm.shrinker);
5137 5138
	struct drm_device *dev = dev_priv->dev;
	unsigned long freed;
5139
	bool unlock;
5140

5141 5142
	if (!i915_gem_shrinker_lock(dev, &unlock))
		return SHRINK_STOP;
5143

5144 5145 5146 5147 5148
	freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
	if (freed < sc->nr_to_scan)
		freed += __i915_gem_shrink(dev_priv,
					   sc->nr_to_scan - freed,
					   false);
5149 5150
	if (unlock)
		mutex_unlock(&dev->struct_mutex);
5151

5152 5153
	return freed;
}
5154

5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
	struct drm_i915_private *dev_priv =
		container_of(nb, struct drm_i915_private, mm.oom_notifier);
	struct drm_device *dev = dev_priv->dev;
	struct drm_i915_gem_object *obj;
	unsigned long timeout = msecs_to_jiffies(5000) + 1;
	unsigned long pinned, bound, unbound, freed;
	bool was_interruptible;
	bool unlock;

	while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
		schedule_timeout_killable(1);
	if (timeout == 0) {
		pr_err("Unable to purge GPU memory due lock contention.\n");
		return NOTIFY_DONE;
	}

	was_interruptible = dev_priv->mm.interruptible;
	dev_priv->mm.interruptible = false;

	freed = i915_gem_shrink_all(dev_priv);

	dev_priv->mm.interruptible = was_interruptible;

	/* Because we may be allocating inside our own driver, we cannot
	 * assert that there are no objects with pinned pages that are not
	 * being pointed to by hardware.
	 */
	unbound = bound = pinned = 0;
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
		if (!obj->base.filp) /* not backed by a freeable object */
			continue;

		if (obj->pages_pin_count)
			pinned += obj->base.size;
		else
			unbound += obj->base.size;
	}
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
		if (!obj->base.filp)
			continue;

		if (obj->pages_pin_count)
			pinned += obj->base.size;
		else
			bound += obj->base.size;
	}

	if (unlock)
		mutex_unlock(&dev->struct_mutex);

	pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
		freed, pinned);
	if (unbound || bound)
		pr_err("%lu and %lu bytes still available in the "
		       "bound and unbound GPU page lists.\n",
		       bound, unbound);

	*(unsigned long *)ptr += freed;
	return NOTIFY_DONE;
}

5219 5220 5221 5222
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

5223 5224 5225
	/* This WARN has probably outlived its usefulness (callers already
	 * WARN if they don't find the GGTT vma they expect). When removing,
	 * remember to remove the pre-check in is_pin_display() as well */
5226 5227 5228 5229
	if (WARN_ON(list_empty(&obj->vma_list)))
		return NULL;

	vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5230
	if (vma->vm != obj_to_ggtt(obj))
5231 5232 5233 5234
		return NULL;

	return vma;
}