i915_gem.c 112.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28 29
#include <drm/drmP.h>
#include <drm/i915_drm.h>
30
#include "i915_drv.h"
C
Chris Wilson 已提交
31
#include "i915_trace.h"
32
#include "intel_drv.h"
33
#include <linux/shmem_fs.h>
34
#include <linux/slab.h>
35
#include <linux/swap.h>
J
Jesse Barnes 已提交
36
#include <linux/pci.h>
37
#include <linux/dma-buf.h>
38

39 40
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41 42
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
						    unsigned alignment,
43 44
						    bool map_and_fenceable,
						    bool nonblocking);
45 46
static int i915_gem_phys_pwrite(struct drm_device *dev,
				struct drm_i915_gem_object *obj,
47
				struct drm_i915_gem_pwrite *args,
48
				struct drm_file *file);
49

50 51 52 53 54 55
static void i915_gem_write_fence(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj);
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
					 struct drm_i915_fence_reg *fence,
					 bool enable);

56
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
57
				    struct shrink_control *sc);
C
Chris Wilson 已提交
58 59
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
60
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
61

62 63 64 65 66 67 68 69
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
	if (obj->tiling_mode)
		i915_gem_release_mmap(obj);

	/* As we do not have an associated fence register, we will force
	 * a tiling change if we ever need to acquire one.
	 */
70
	obj->fence_dirty = false;
71 72 73
	obj->fence_reg = I915_FENCE_REG_NONE;
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
}

89
static int
90
i915_gem_wait_for_error(struct i915_gpu_error *error)
91 92 93
{
	int ret;

94 95
#define EXIT_COND (!i915_reset_in_progress(error))
	if (EXIT_COND)
96 97
		return 0;

98 99 100 101
	/* GPU is already declared terminally dead, give up. */
	if (i915_terminally_wedged(error))
		return -EIO;

102 103 104 105 106
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
107 108 109
	ret = wait_event_interruptible_timeout(error->reset_queue,
					       EXIT_COND,
					       10*HZ);
110 111 112 113
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
114
		return ret;
115
	}
116
#undef EXIT_COND
117

118
	return 0;
119 120
}

121
int i915_mutex_lock_interruptible(struct drm_device *dev)
122
{
123
	struct drm_i915_private *dev_priv = dev->dev_private;
124 125
	int ret;

126
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
127 128 129 130 131 132 133
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

134
	WARN_ON(i915_verify_lists(dev));
135 136
	return 0;
}
137

138
static inline bool
139
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
140
{
C
Chris Wilson 已提交
141
	return obj->gtt_space && !obj->active;
142 143
}

J
Jesse Barnes 已提交
144 145
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
146
		    struct drm_file *file)
J
Jesse Barnes 已提交
147
{
148
	struct drm_i915_private *dev_priv = dev->dev_private;
J
Jesse Barnes 已提交
149
	struct drm_i915_gem_init *args = data;
150

151 152 153
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

154 155 156
	if (args->gtt_start >= args->gtt_end ||
	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
		return -EINVAL;
J
Jesse Barnes 已提交
157

158 159 160 161
	/* GEM with user mode setting was never supported on ilk and later. */
	if (INTEL_INFO(dev)->gen >= 5)
		return -ENODEV;

J
Jesse Barnes 已提交
162
	mutex_lock(&dev->struct_mutex);
163 164
	i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
				  args->gtt_end);
165
	dev_priv->gtt.mappable_end = args->gtt_end;
166 167
	mutex_unlock(&dev->struct_mutex);

168
	return 0;
169 170
}

171 172
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
173
			    struct drm_file *file)
174
{
175
	struct drm_i915_private *dev_priv = dev->dev_private;
176
	struct drm_i915_gem_get_aperture *args = data;
177 178
	struct drm_i915_gem_object *obj;
	size_t pinned;
179

180
	pinned = 0;
181
	mutex_lock(&dev->struct_mutex);
C
Chris Wilson 已提交
182
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
183 184
		if (obj->pin_count)
			pinned += obj->gtt_space->size;
185
	mutex_unlock(&dev->struct_mutex);
186

B
Ben Widawsky 已提交
187
	args->aper_size = dev_priv->gtt.total;
188
	args->aper_available_size = args->aper_size - pinned;
189

190 191 192
	return 0;
}

193 194 195 196 197 198 199 200 201 202 203 204
void *i915_gem_object_alloc(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	kmem_cache_free(dev_priv->slab, obj);
}

205 206 207 208 209
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
210
{
211
	struct drm_i915_gem_object *obj;
212 213
	int ret;
	u32 handle;
214

215
	size = roundup(size, PAGE_SIZE);
216 217
	if (size == 0)
		return -EINVAL;
218 219

	/* Allocate the new object */
220
	obj = i915_gem_alloc_object(dev, size);
221 222 223
	if (obj == NULL)
		return -ENOMEM;

224
	ret = drm_gem_handle_create(file, &obj->base, &handle);
225
	if (ret) {
226 227
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
228
		i915_gem_object_free(obj);
229
		return ret;
230
	}
231

232
	/* drop reference from allocate - handle holds it now */
233
	drm_gem_object_unreference(&obj->base);
234 235
	trace_i915_gem_object_create(obj);

236
	*handle_p = handle;
237 238 239
	return 0;
}

240 241 242 243 244 245
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
246
	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
			       args->size, &args->handle);
}

int i915_gem_dumb_destroy(struct drm_file *file,
			  struct drm_device *dev,
			  uint32_t handle)
{
	return drm_gem_handle_delete(file, handle);
}

/**
 * Creates a new mm object and returns a handle to it.
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
267

268 269 270 271
	return i915_gem_create(file, dev,
			       args->size, &args->handle);
}

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

298
static inline int
299 300
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

324 325 326
/* Per-page copy function for the shmem pread fastpath.
 * Flushes invalid cachelines before reading the target if
 * needs_clflush is set. */
327
static int
328 329 330 331 332 333 334
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

335
	if (unlikely(page_do_bit17_swizzling))
336 337 338 339 340 341 342 343 344 345 346
		return -EINVAL;

	vaddr = kmap_atomic(page);
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_to_user_inatomic(user_data,
				      vaddr + shmem_page_offset,
				      page_length);
	kunmap_atomic(vaddr);

347
	return ret ? -EFAULT : 0;
348 349
}

350 351 352 353
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
354
	if (unlikely(swizzled)) {
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

372 373 374 375 376 377 378 379 380 381 382 383
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
384 385 386
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
387 388 389 390 391 392 393 394 395 396 397

	if (page_do_bit17_swizzling)
		ret = __copy_to_user_swizzled(user_data,
					      vaddr, shmem_page_offset,
					      page_length);
	else
		ret = __copy_to_user(user_data,
				     vaddr + shmem_page_offset,
				     page_length);
	kunmap(page);

398
	return ret ? - EFAULT : 0;
399 400
}

401
static int
402 403 404 405
i915_gem_shmem_pread(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args,
		     struct drm_file *file)
406
{
407
	char __user *user_data;
408
	ssize_t remain;
409
	loff_t offset;
410
	int shmem_page_offset, page_length, ret = 0;
411
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
412
	int prefaulted = 0;
413
	int needs_clflush = 0;
414
	struct sg_page_iter sg_iter;
415

V
Ville Syrjälä 已提交
416
	user_data = to_user_ptr(args->data_ptr);
417 418
	remain = args->size;

419
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
420

421 422 423 424 425 426 427
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
		/* If we're not in the cpu read domain, set ourself into the gtt
		 * read domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will dirty the data
		 * anyway again before the next pread happens. */
		if (obj->cache_level == I915_CACHE_NONE)
			needs_clflush = 1;
C
Chris Wilson 已提交
428 429 430 431 432
		if (obj->gtt_space) {
			ret = i915_gem_object_set_to_gtt_domain(obj, false);
			if (ret)
				return ret;
		}
433
	}
434

435 436 437 438 439 440
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

441
	offset = args->offset;
442

443 444
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
445
		struct page *page = sg_page_iter_page(&sg_iter);
446 447 448 449

		if (remain <= 0)
			break;

450 451 452 453 454
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
455
		shmem_page_offset = offset_in_page(offset);
456 457 458 459
		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

460 461 462
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

463 464 465 466 467
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
		if (ret == 0)
			goto next_page;
468 469 470

		mutex_unlock(&dev->struct_mutex);

471
		if (!prefaulted) {
472
			ret = fault_in_multipages_writeable(user_data, remain);
473 474 475 476 477 478 479
			/* Userspace is tricking us, but we've already clobbered
			 * its pages with the prefault and promised to write the
			 * data up to the first fault. Hence ignore any errors
			 * and just continue. */
			(void)ret;
			prefaulted = 1;
		}
480

481 482 483
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
484

485
		mutex_lock(&dev->struct_mutex);
486

487
next_page:
488 489
		mark_page_accessed(page);

490
		if (ret)
491 492
			goto out;

493
		remain -= page_length;
494
		user_data += page_length;
495 496 497
		offset += page_length;
	}

498
out:
499 500
	i915_gem_object_unpin_pages(obj);

501 502 503
	return ret;
}

504 505 506 507 508 509 510
/**
 * Reads data from the object referenced by handle.
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
511
		     struct drm_file *file)
512 513
{
	struct drm_i915_gem_pread *args = data;
514
	struct drm_i915_gem_object *obj;
515
	int ret = 0;
516

517 518 519 520
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
V
Ville Syrjälä 已提交
521
		       to_user_ptr(args->data_ptr),
522 523 524
		       args->size))
		return -EFAULT;

525
	ret = i915_mutex_lock_interruptible(dev);
526
	if (ret)
527
		return ret;
528

529
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
530
	if (&obj->base == NULL) {
531 532
		ret = -ENOENT;
		goto unlock;
533
	}
534

535
	/* Bounds check source.  */
536 537
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
538
		ret = -EINVAL;
539
		goto out;
C
Chris Wilson 已提交
540 541
	}

542 543 544 545 546 547 548 549
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

C
Chris Wilson 已提交
550 551
	trace_i915_gem_object_pread(obj, args->offset, args->size);

552
	ret = i915_gem_shmem_pread(dev, obj, args, file);
553

554
out:
555
	drm_gem_object_unreference(&obj->base);
556
unlock:
557
	mutex_unlock(&dev->struct_mutex);
558
	return ret;
559 560
}

561 562
/* This is the fast write path which cannot handle
 * page faults in the source data
563
 */
564 565 566 567 568 569

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
570
{
571 572
	void __iomem *vaddr_atomic;
	void *vaddr;
573
	unsigned long unwritten;
574

P
Peter Zijlstra 已提交
575
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
576 577 578
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force*)vaddr_atomic + page_offset;
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
579
						      user_data, length);
P
Peter Zijlstra 已提交
580
	io_mapping_unmap_atomic(vaddr_atomic);
581
	return unwritten;
582 583
}

584 585 586 587
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 */
588
static int
589 590
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
591
			 struct drm_i915_gem_pwrite *args,
592
			 struct drm_file *file)
593
{
594
	drm_i915_private_t *dev_priv = dev->dev_private;
595
	ssize_t remain;
596
	loff_t offset, page_base;
597
	char __user *user_data;
D
Daniel Vetter 已提交
598 599
	int page_offset, page_length, ret;

600
	ret = i915_gem_object_pin(obj, 0, true, true);
D
Daniel Vetter 已提交
601 602 603 604 605 606 607 608 609 610
	if (ret)
		goto out;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		goto out_unpin;
611

V
Ville Syrjälä 已提交
612
	user_data = to_user_ptr(args->data_ptr);
613 614
	remain = args->size;

615
	offset = obj->gtt_offset + args->offset;
616 617 618 619

	while (remain > 0) {
		/* Operation in this page
		 *
620 621 622
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
623
		 */
624 625
		page_base = offset & PAGE_MASK;
		page_offset = offset_in_page(offset);
626 627 628 629 630
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

		/* If we get a fault while copying data, then (presumably) our
631 632
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
633
		 */
B
Ben Widawsky 已提交
634
		if (fast_user_write(dev_priv->gtt.mappable, page_base,
D
Daniel Vetter 已提交
635 636 637 638
				    page_offset, user_data, page_length)) {
			ret = -EFAULT;
			goto out_unpin;
		}
639

640 641 642
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
643 644
	}

D
Daniel Vetter 已提交
645 646 647
out_unpin:
	i915_gem_object_unpin(obj);
out:
648
	return ret;
649 650
}

651 652 653 654
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set. */
655
static int
656 657 658 659 660
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
661
{
662
	char *vaddr;
663
	int ret;
664

665
	if (unlikely(page_do_bit17_swizzling))
666
		return -EINVAL;
667

668 669 670 671 672 673 674 675 676 677 678
	vaddr = kmap_atomic(page);
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
						user_data,
						page_length);
	if (needs_clflush_after)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	kunmap_atomic(vaddr);
679

680
	return ret ? -EFAULT : 0;
681 682
}

683 684
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
685
static int
686 687 688 689 690
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
691
{
692 693
	char *vaddr;
	int ret;
694

695
	vaddr = kmap(page);
696
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
697 698 699
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
700 701
	if (page_do_bit17_swizzling)
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
702 703
						user_data,
						page_length);
704 705 706 707 708
	else
		ret = __copy_from_user(vaddr + shmem_page_offset,
				       user_data,
				       page_length);
	if (needs_clflush_after)
709 710 711
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
712
	kunmap(page);
713

714
	return ret ? -EFAULT : 0;
715 716 717
}

static int
718 719 720 721
i915_gem_shmem_pwrite(struct drm_device *dev,
		      struct drm_i915_gem_object *obj,
		      struct drm_i915_gem_pwrite *args,
		      struct drm_file *file)
722 723
{
	ssize_t remain;
724 725
	loff_t offset;
	char __user *user_data;
726
	int shmem_page_offset, page_length, ret = 0;
727
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
728
	int hit_slowpath = 0;
729 730
	int needs_clflush_after = 0;
	int needs_clflush_before = 0;
731
	struct sg_page_iter sg_iter;
732

V
Ville Syrjälä 已提交
733
	user_data = to_user_ptr(args->data_ptr);
734 735
	remain = args->size;

736
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
737

738 739 740 741 742 743 744
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
		/* If we're not in the cpu write domain, set ourself into the gtt
		 * write domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will use the data
		 * right away and we therefore have to clflush anyway. */
		if (obj->cache_level == I915_CACHE_NONE)
			needs_clflush_after = 1;
C
Chris Wilson 已提交
745 746 747 748 749
		if (obj->gtt_space) {
			ret = i915_gem_object_set_to_gtt_domain(obj, true);
			if (ret)
				return ret;
		}
750 751 752 753 754 755 756
	}
	/* Same trick applies for invalidate partially written cachelines before
	 * writing.  */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
	    && obj->cache_level == I915_CACHE_NONE)
		needs_clflush_before = 1;

757 758 759 760 761 762
	ret = i915_gem_object_get_pages(obj);
	if (ret)
		return ret;

	i915_gem_object_pin_pages(obj);

763
	offset = args->offset;
764
	obj->dirty = 1;
765

766 767
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
			 offset >> PAGE_SHIFT) {
768
		struct page *page = sg_page_iter_page(&sg_iter);
769
		int partial_cacheline_write;
770

771 772 773
		if (remain <= 0)
			break;

774 775 776 777 778
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
779
		shmem_page_offset = offset_in_page(offset);
780 781 782 783 784

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

785 786 787 788 789 790 791
		/* If we don't overwrite a cacheline completely we need to be
		 * careful to have up-to-date data by first clflushing. Don't
		 * overcomplicate things and flush the entire patch. */
		partial_cacheline_write = needs_clflush_before &&
			((shmem_page_offset | page_length)
				& (boot_cpu_data.x86_clflush_size - 1));

792 793 794
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

795 796 797 798 799 800
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
		if (ret == 0)
			goto next_page;
801 802 803

		hit_slowpath = 1;
		mutex_unlock(&dev->struct_mutex);
804 805 806 807
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
808

809
		mutex_lock(&dev->struct_mutex);
810

811
next_page:
812 813 814
		set_page_dirty(page);
		mark_page_accessed(page);

815
		if (ret)
816 817
			goto out;

818
		remain -= page_length;
819
		user_data += page_length;
820
		offset += page_length;
821 822
	}

823
out:
824 825
	i915_gem_object_unpin_pages(obj);

826
	if (hit_slowpath) {
827 828 829 830 831 832 833
		/*
		 * Fixup: Flush cpu caches in case we didn't flush the dirty
		 * cachelines in-line while writing and the object moved
		 * out of the cpu write domain while we've dropped the lock.
		 */
		if (!needs_clflush_after &&
		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
834
			i915_gem_clflush_object(obj);
835
			i915_gem_chipset_flush(dev);
836
		}
837
	}
838

839
	if (needs_clflush_after)
840
		i915_gem_chipset_flush(dev);
841

842
	return ret;
843 844 845 846 847 848 849 850 851
}

/**
 * Writes data to the object referenced by handle.
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
852
		      struct drm_file *file)
853 854
{
	struct drm_i915_gem_pwrite *args = data;
855
	struct drm_i915_gem_object *obj;
856 857 858 859 860 861
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
V
Ville Syrjälä 已提交
862
		       to_user_ptr(args->data_ptr),
863 864 865
		       args->size))
		return -EFAULT;

V
Ville Syrjälä 已提交
866
	ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
867
					   args->size);
868 869
	if (ret)
		return -EFAULT;
870

871
	ret = i915_mutex_lock_interruptible(dev);
872
	if (ret)
873
		return ret;
874

875
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
876
	if (&obj->base == NULL) {
877 878
		ret = -ENOENT;
		goto unlock;
879
	}
880

881
	/* Bounds check destination. */
882 883
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
884
		ret = -EINVAL;
885
		goto out;
C
Chris Wilson 已提交
886 887
	}

888 889 890 891 892 893 894 895
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

C
Chris Wilson 已提交
896 897
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

D
Daniel Vetter 已提交
898
	ret = -EFAULT;
899 900 901 902 903 904
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
905
	if (obj->phys_obj) {
906
		ret = i915_gem_phys_pwrite(dev, obj, args, file);
907 908 909
		goto out;
	}

910
	if (obj->cache_level == I915_CACHE_NONE &&
911
	    obj->tiling_mode == I915_TILING_NONE &&
912
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
913
		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
D
Daniel Vetter 已提交
914 915 916
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
		 * textures). Fallback to the shmem path in that case. */
917
	}
918

919
	if (ret == -EFAULT || ret == -ENOSPC)
D
Daniel Vetter 已提交
920
		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
921

922
out:
923
	drm_gem_object_unreference(&obj->base);
924
unlock:
925
	mutex_unlock(&dev->struct_mutex);
926 927 928
	return ret;
}

929
int
930
i915_gem_check_wedge(struct i915_gpu_error *error,
931 932
		     bool interruptible)
{
933
	if (i915_reset_in_progress(error)) {
934 935 936 937 938
		/* Non-interruptible callers can't handle -EAGAIN, hence return
		 * -EIO unconditionally for these. */
		if (!interruptible)
			return -EIO;

939 940
		/* Recovery complete, but the reset failed ... */
		if (i915_terminally_wedged(error))
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
			return -EIO;

		return -EAGAIN;
	}

	return 0;
}

/*
 * Compare seqno against outstanding lazy request. Emit a request if they are
 * equal.
 */
static int
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
{
	int ret;

	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));

	ret = 0;
	if (seqno == ring->outstanding_lazy_request)
		ret = i915_add_request(ring, NULL, NULL);

	return ret;
}

/**
 * __wait_seqno - wait until execution of seqno has finished
 * @ring: the ring expected to report seqno
 * @seqno: duh!
971
 * @reset_counter: reset sequence associated with the given seqno
972 973 974
 * @interruptible: do an interruptible wait (normally yes)
 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
 *
975 976 977 978 979 980 981
 * Note: It is of utmost importance that the passed in seqno and reset_counter
 * values have been read by the caller in an smp safe manner. Where read-side
 * locks are involved, it is sufficient to read the reset_counter before
 * unlocking the lock that protects the seqno. For lockless tricks, the
 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
 * inserted.
 *
982 983 984 985
 * Returns 0 if the seqno was found within the alloted time. Else returns the
 * errno with remaining time filled in timeout argument.
 */
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
986
			unsigned reset_counter,
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
			bool interruptible, struct timespec *timeout)
{
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
	struct timespec before, now, wait_time={1,0};
	unsigned long timeout_jiffies;
	long end;
	bool wait_forever = true;
	int ret;

	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
		return 0;

	trace_i915_gem_request_wait_begin(ring, seqno);

	if (timeout != NULL) {
		wait_time = *timeout;
		wait_forever = false;
	}

	timeout_jiffies = timespec_to_jiffies(&wait_time);

	if (WARN_ON(!ring->irq_get(ring)))
		return -ENODEV;

	/* Record current time in case interrupted by signal, or wedged * */
	getrawmonotonic(&before);

#define EXIT_COND \
	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1016 1017
	 i915_reset_in_progress(&dev_priv->gpu_error) || \
	 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1018 1019 1020 1021 1022 1023 1024 1025 1026
	do {
		if (interruptible)
			end = wait_event_interruptible_timeout(ring->irq_queue,
							       EXIT_COND,
							       timeout_jiffies);
		else
			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
						 timeout_jiffies);

1027 1028 1029 1030 1031 1032 1033
		/* We need to check whether any gpu reset happened in between
		 * the caller grabbing the seqno and now ... */
		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
			end = -EAGAIN;

		/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
		 * gone. */
1034
		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
		if (ret)
			end = ret;
	} while (end == 0 && wait_forever);

	getrawmonotonic(&now);

	ring->irq_put(ring);
	trace_i915_gem_request_wait_end(ring, seqno);
#undef EXIT_COND

	if (timeout) {
		struct timespec sleep_time = timespec_sub(now, before);
		*timeout = timespec_sub(*timeout, sleep_time);
1048 1049
		if (!timespec_valid(timeout)) /* i.e. negative time remains */
			set_normalized_timespec(timeout, 0, 0);
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
	}

	switch (end) {
	case -EIO:
	case -EAGAIN: /* Wedged */
	case -ERESTARTSYS: /* Signal */
		return (int)end;
	case 0: /* Timeout */
		return -ETIME;
	default: /* Completed */
		WARN_ON(end < 0); /* We're not aware of other errors */
		return 0;
	}
}

/**
 * Waits for a sequence number to be signaled, and cleans up the
 * request and object lists appropriately for that event.
 */
int
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
	struct drm_device *dev = ring->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible = dev_priv->mm.interruptible;
	int ret;

	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
	BUG_ON(seqno == 0);

1080
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1081 1082 1083 1084 1085 1086 1087
	if (ret)
		return ret;

	ret = i915_gem_check_olr(ring, seqno);
	if (ret)
		return ret;

1088 1089 1090
	return __wait_seqno(ring, seqno,
			    atomic_read(&dev_priv->gpu_error.reset_counter),
			    interruptible, NULL);
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
}

/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
 */
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly)
{
	struct intel_ring_buffer *ring = obj->ring;
	u32 seqno;
	int ret;

	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
	if (seqno == 0)
		return 0;

	ret = i915_wait_seqno(ring, seqno);
	if (ret)
		return ret;

	i915_gem_retire_requests_ring(ring);

	/* Manually manage the write flush as we may have not yet
	 * retired the buffer.
	 */
	if (obj->last_write_seqno &&
	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
		obj->last_write_seqno = 0;
		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
	}

	return 0;
}

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
/* A nonblocking variant of the above wait. This is a highly dangerous routine
 * as the object state may change during this call.
 */
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
					    bool readonly)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring = obj->ring;
1137
	unsigned reset_counter;
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
	u32 seqno;
	int ret;

	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
	BUG_ON(!dev_priv->mm.interruptible);

	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
	if (seqno == 0)
		return 0;

1148
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1149 1150 1151 1152 1153 1154 1155
	if (ret)
		return ret;

	ret = i915_gem_check_olr(ring, seqno);
	if (ret)
		return ret;

1156
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1157
	mutex_unlock(&dev->struct_mutex);
1158
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
	mutex_lock(&dev->struct_mutex);

	i915_gem_retire_requests_ring(ring);

	/* Manually manage the write flush as we may have not yet
	 * retired the buffer.
	 */
	if (obj->last_write_seqno &&
	    i915_seqno_passed(seqno, obj->last_write_seqno)) {
		obj->last_write_seqno = 0;
		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
	}

	return ret;
}

1175
/**
1176 1177
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1178 1179 1180
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1181
			  struct drm_file *file)
1182 1183
{
	struct drm_i915_gem_set_domain *args = data;
1184
	struct drm_i915_gem_object *obj;
1185 1186
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1187 1188
	int ret;

1189
	/* Only handle setting domains to types used by the CPU. */
1190
	if (write_domain & I915_GEM_GPU_DOMAINS)
1191 1192
		return -EINVAL;

1193
	if (read_domains & I915_GEM_GPU_DOMAINS)
1194 1195 1196 1197 1198 1199 1200 1201
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1202
	ret = i915_mutex_lock_interruptible(dev);
1203
	if (ret)
1204
		return ret;
1205

1206
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1207
	if (&obj->base == NULL) {
1208 1209
		ret = -ENOENT;
		goto unlock;
1210
	}
1211

1212 1213 1214 1215 1216 1217 1218 1219
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
	if (ret)
		goto unref;

1220 1221
	if (read_domains & I915_GEM_DOMAIN_GTT) {
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1222 1223 1224 1225 1226 1227 1228

		/* Silently promote "you're not bound, there was nothing to do"
		 * to success, since the client was just asking us to
		 * make sure everything was done.
		 */
		if (ret == -EINVAL)
			ret = 0;
1229
	} else {
1230
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1231 1232
	}

1233
unref:
1234
	drm_gem_object_unreference(&obj->base);
1235
unlock:
1236 1237 1238 1239 1240 1241 1242 1243 1244
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Called when user space has done writes to this buffer
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1245
			 struct drm_file *file)
1246 1247
{
	struct drm_i915_gem_sw_finish *args = data;
1248
	struct drm_i915_gem_object *obj;
1249 1250
	int ret = 0;

1251
	ret = i915_mutex_lock_interruptible(dev);
1252
	if (ret)
1253
		return ret;
1254

1255
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1256
	if (&obj->base == NULL) {
1257 1258
		ret = -ENOENT;
		goto unlock;
1259 1260 1261
	}

	/* Pinned buffers may be scanout, so flush the cache */
1262
	if (obj->pin_count)
1263 1264
		i915_gem_object_flush_cpu_write_domain(obj);

1265
	drm_gem_object_unreference(&obj->base);
1266
unlock:
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Maps the contents of an object, returning the address it is mapped
 * into.
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1280
		    struct drm_file *file)
1281 1282 1283 1284 1285
{
	struct drm_i915_gem_mmap *args = data;
	struct drm_gem_object *obj;
	unsigned long addr;

1286
	obj = drm_gem_object_lookup(dev, file, args->handle);
1287
	if (obj == NULL)
1288
		return -ENOENT;
1289

1290 1291 1292 1293 1294 1295 1296 1297
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
	if (!obj->filp) {
		drm_gem_object_unreference_unlocked(obj);
		return -EINVAL;
	}

1298
	addr = vm_mmap(obj->filp, 0, args->size,
1299 1300
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1301
	drm_gem_object_unreference_unlocked(obj);
1302 1303 1304 1305 1306 1307 1308 1309
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
/**
 * i915_gem_fault - fault a page into the GTT
 * vma: VMA in question
 * vmf: fault info
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
 */
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
1328 1329
	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
	struct drm_device *dev = obj->base.dev;
1330
	drm_i915_private_t *dev_priv = dev->dev_private;
1331 1332 1333
	pgoff_t page_offset;
	unsigned long pfn;
	int ret = 0;
1334
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1335 1336 1337 1338 1339

	/* We don't use vmf->pgoff since that has the fake offset */
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;

1340 1341 1342
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto out;
1343

C
Chris Wilson 已提交
1344 1345
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1346 1347 1348 1349 1350 1351
	/* Access to snoopable pages through the GTT is incoherent. */
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
		ret = -EINVAL;
		goto unlock;
	}

1352
	/* Now bind it into the GTT if needed */
1353 1354 1355
	ret = i915_gem_object_pin(obj, 0, true, false);
	if (ret)
		goto unlock;
1356

1357 1358 1359
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
		goto unpin;
1360

1361
	ret = i915_gem_object_get_fence(obj);
1362
	if (ret)
1363
		goto unpin;
1364

1365 1366
	obj->fault_mappable = true;

B
Ben Widawsky 已提交
1367
	pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
1368 1369 1370 1371
		page_offset;

	/* Finally, remap it using the new GTT offset */
	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1372 1373
unpin:
	i915_gem_object_unpin(obj);
1374
unlock:
1375
	mutex_unlock(&dev->struct_mutex);
1376
out:
1377
	switch (ret) {
1378
	case -EIO:
1379 1380 1381
		/* If this -EIO is due to a gpu hang, give the reset code a
		 * chance to clean up the mess. Otherwise return the proper
		 * SIGBUS. */
1382
		if (i915_terminally_wedged(&dev_priv->gpu_error))
1383
			return VM_FAULT_SIGBUS;
1384
	case -EAGAIN:
1385 1386 1387 1388 1389 1390 1391
		/* Give the error handler a chance to run and move the
		 * objects off the GPU active list. Next time we service the
		 * fault, we should be able to transition the page into the
		 * GTT without touching the GPU (and so avoid further
		 * EIO/EGAIN). If the GPU is wedged, then there is no issue
		 * with coherency, just lost writes.
		 */
1392
		set_need_resched();
1393 1394
	case 0:
	case -ERESTARTSYS:
1395
	case -EINTR:
1396 1397 1398 1399 1400
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1401
		return VM_FAULT_NOPAGE;
1402 1403
	case -ENOMEM:
		return VM_FAULT_OOM;
1404 1405
	case -ENOSPC:
		return VM_FAULT_SIGBUS;
1406
	default:
1407
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1408
		return VM_FAULT_SIGBUS;
1409 1410 1411
	}
}

1412 1413 1414 1415
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1416
 * Preserve the reservation of the mmapping with the DRM core code, but
1417 1418 1419 1420 1421 1422 1423 1424 1425
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1426
void
1427
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1428
{
1429 1430
	if (!obj->fault_mappable)
		return;
1431

1432 1433 1434 1435
	if (obj->base.dev->dev_mapping)
		unmap_mapping_range(obj->base.dev->dev_mapping,
				    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
				    obj->base.size, 1);
1436

1437
	obj->fault_mappable = false;
1438 1439
}

1440
uint32_t
1441
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1442
{
1443
	uint32_t gtt_size;
1444 1445

	if (INTEL_INFO(dev)->gen >= 4 ||
1446 1447
	    tiling_mode == I915_TILING_NONE)
		return size;
1448 1449 1450

	/* Previous chips need a power-of-two fence region when tiling */
	if (INTEL_INFO(dev)->gen == 3)
1451
		gtt_size = 1024*1024;
1452
	else
1453
		gtt_size = 512*1024;
1454

1455 1456
	while (gtt_size < size)
		gtt_size <<= 1;
1457

1458
	return gtt_size;
1459 1460
}

1461 1462 1463 1464 1465
/**
 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
 * @obj: object to check
 *
 * Return the required GTT alignment for an object, taking into account
1466
 * potential fence register mapping.
1467
 */
1468 1469 1470
uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
			   int tiling_mode, bool fenced)
1471 1472 1473 1474 1475
{
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
1476
	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1477
	    tiling_mode == I915_TILING_NONE)
1478 1479
		return 4096;

1480 1481 1482 1483
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
1484
	return i915_gem_get_gtt_size(dev, size, tiling_mode);
1485 1486
}

1487 1488 1489 1490 1491 1492 1493 1494
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	int ret;

	if (obj->base.map_list.map)
		return 0;

1495 1496
	dev_priv->mm.shrinker_no_lock_stealing = true;

1497 1498
	ret = drm_gem_create_mmap_offset(&obj->base);
	if (ret != -ENOSPC)
1499
		goto out;
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510

	/* Badly fragmented mmap space? The only way we can recover
	 * space is by destroying unwanted objects. We can't randomly release
	 * mmap_offsets as userspace expects them to be persistent for the
	 * lifetime of the objects. The closest we can is to release the
	 * offsets on purgeable objects by truncating it and marking it purged,
	 * which prevents userspace from ever using that object again.
	 */
	i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
	ret = drm_gem_create_mmap_offset(&obj->base);
	if (ret != -ENOSPC)
1511
		goto out;
1512 1513

	i915_gem_shrink_all(dev_priv);
1514 1515 1516 1517 1518
	ret = drm_gem_create_mmap_offset(&obj->base);
out:
	dev_priv->mm.shrinker_no_lock_stealing = false;

	return ret;
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	if (!obj->base.map_list.map)
		return;

	drm_gem_free_mmap_offset(&obj->base);
}

1529
int
1530 1531 1532 1533
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
		  uint32_t handle,
		  uint64_t *offset)
1534
{
1535
	struct drm_i915_private *dev_priv = dev->dev_private;
1536
	struct drm_i915_gem_object *obj;
1537 1538
	int ret;

1539
	ret = i915_mutex_lock_interruptible(dev);
1540
	if (ret)
1541
		return ret;
1542

1543
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1544
	if (&obj->base == NULL) {
1545 1546 1547
		ret = -ENOENT;
		goto unlock;
	}
1548

B
Ben Widawsky 已提交
1549
	if (obj->base.size > dev_priv->gtt.mappable_end) {
1550
		ret = -E2BIG;
1551
		goto out;
1552 1553
	}

1554
	if (obj->madv != I915_MADV_WILLNEED) {
1555
		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1556 1557
		ret = -EINVAL;
		goto out;
1558 1559
	}

1560 1561 1562
	ret = i915_gem_object_create_mmap_offset(obj);
	if (ret)
		goto out;
1563

1564
	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1565

1566
out:
1567
	drm_gem_object_unreference(&obj->base);
1568
unlock:
1569
	mutex_unlock(&dev->struct_mutex);
1570
	return ret;
1571 1572
}

1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}

D
Daniel Vetter 已提交
1597 1598 1599
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1600 1601 1602
{
	struct inode *inode;

1603
	i915_gem_object_free_mmap_offset(obj);
1604

1605 1606
	if (obj->base.filp == NULL)
		return;
1607

D
Daniel Vetter 已提交
1608 1609 1610 1611 1612
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
A
Al Viro 已提交
1613
	inode = file_inode(obj->base.filp);
D
Daniel Vetter 已提交
1614
	shmem_truncate_range(inode, 0, (loff_t)-1);
1615

D
Daniel Vetter 已提交
1616 1617
	obj->madv = __I915_MADV_PURGED;
}
1618

D
Daniel Vetter 已提交
1619 1620 1621 1622
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
	return obj->madv == I915_MADV_DONTNEED;
1623 1624
}

1625
static void
1626
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1627
{
1628 1629
	struct sg_page_iter sg_iter;
	int ret;
1630

1631
	BUG_ON(obj->madv == __I915_MADV_PURGED);
1632

C
Chris Wilson 已提交
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret) {
		/* In the event of a disaster, abandon all caches and
		 * hope for the best.
		 */
		WARN_ON(ret != -EIO);
		i915_gem_clflush_object(obj);
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	}

1643
	if (i915_gem_object_needs_bit17_swizzle(obj))
1644 1645
		i915_gem_object_save_bit_17_swizzle(obj);

1646 1647
	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
1648

1649
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1650
		struct page *page = sg_page_iter_page(&sg_iter);
1651

1652
		if (obj->dirty)
1653
			set_page_dirty(page);
1654

1655
		if (obj->madv == I915_MADV_WILLNEED)
1656
			mark_page_accessed(page);
1657

1658
		page_cache_release(page);
1659
	}
1660
	obj->dirty = 0;
1661

1662 1663
	sg_free_table(obj->pages);
	kfree(obj->pages);
1664
}
C
Chris Wilson 已提交
1665

1666
int
1667 1668 1669 1670
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
	const struct drm_i915_gem_object_ops *ops = obj->ops;

1671
	if (obj->pages == NULL)
1672 1673 1674
		return 0;

	BUG_ON(obj->gtt_space);
C
Chris Wilson 已提交
1675

1676 1677 1678
	if (obj->pages_pin_count)
		return -EBUSY;

1679 1680 1681 1682 1683
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
	list_del(&obj->gtt_list);

1684
	ops->put_pages(obj);
1685
	obj->pages = NULL;
1686

C
Chris Wilson 已提交
1687 1688 1689 1690 1691 1692 1693
	if (i915_gem_object_is_purgeable(obj))
		i915_gem_object_truncate(obj);

	return 0;
}

static long
1694 1695
__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
		  bool purgeable_only)
C
Chris Wilson 已提交
1696 1697 1698 1699 1700 1701 1702
{
	struct drm_i915_gem_object *obj, *next;
	long count = 0;

	list_for_each_entry_safe(obj, next,
				 &dev_priv->mm.unbound_list,
				 gtt_list) {
1703
		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1704
		    i915_gem_object_put_pages(obj) == 0) {
C
Chris Wilson 已提交
1705 1706 1707 1708 1709 1710 1711 1712 1713
			count += obj->base.size >> PAGE_SHIFT;
			if (count >= target)
				return count;
		}
	}

	list_for_each_entry_safe(obj, next,
				 &dev_priv->mm.inactive_list,
				 mm_list) {
1714
		if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
C
Chris Wilson 已提交
1715
		    i915_gem_object_unbind(obj) == 0 &&
1716
		    i915_gem_object_put_pages(obj) == 0) {
C
Chris Wilson 已提交
1717 1718 1719 1720 1721 1722 1723 1724 1725
			count += obj->base.size >> PAGE_SHIFT;
			if (count >= target)
				return count;
		}
	}

	return count;
}

1726 1727 1728 1729 1730 1731
static long
i915_gem_purge(struct drm_i915_private *dev_priv, long target)
{
	return __i915_gem_shrink(dev_priv, target, true);
}

C
Chris Wilson 已提交
1732 1733 1734 1735 1736 1737 1738 1739
static void
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj, *next;

	i915_gem_evict_everything(dev_priv->dev);

	list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1740
		i915_gem_object_put_pages(obj);
D
Daniel Vetter 已提交
1741 1742
}

1743
static int
C
Chris Wilson 已提交
1744
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1745
{
C
Chris Wilson 已提交
1746
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1747 1748
	int page_count, i;
	struct address_space *mapping;
1749 1750
	struct sg_table *st;
	struct scatterlist *sg;
1751
	struct sg_page_iter sg_iter;
1752
	struct page *page;
1753
	unsigned long last_pfn = 0;	/* suppress gcc warning */
C
Chris Wilson 已提交
1754
	gfp_t gfp;
1755

C
Chris Wilson 已提交
1756 1757 1758 1759 1760 1761 1762
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);

1763 1764 1765 1766
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return -ENOMEM;

1767
	page_count = obj->base.size / PAGE_SIZE;
1768 1769 1770
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		sg_free_table(st);
		kfree(st);
1771
		return -ENOMEM;
1772
	}
1773

1774 1775 1776 1777 1778
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
A
Al Viro 已提交
1779
	mapping = file_inode(obj->base.filp)->i_mapping;
C
Chris Wilson 已提交
1780
	gfp = mapping_gfp_mask(mapping);
1781
	gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
C
Chris Wilson 已提交
1782
	gfp &= ~(__GFP_IO | __GFP_WAIT);
1783 1784 1785
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
			i915_gem_purge(dev_priv, page_count);
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
1796
			gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
C
Chris Wilson 已提交
1797 1798 1799 1800 1801 1802 1803
			gfp |= __GFP_IO | __GFP_WAIT;

			i915_gem_shrink_all(dev_priv);
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
			if (IS_ERR(page))
				goto err_pages;

1804
			gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
C
Chris Wilson 已提交
1805 1806
			gfp &= ~(__GFP_IO | __GFP_WAIT);
		}
1807

1808 1809 1810 1811 1812 1813 1814 1815 1816
		if (!i || page_to_pfn(page) != last_pfn + 1) {
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
1817 1818
	}

1819
	sg_mark_end(sg);
1820 1821
	obj->pages = st;

1822
	if (i915_gem_object_needs_bit17_swizzle(obj))
1823 1824 1825 1826 1827
		i915_gem_object_do_bit_17_swizzle(obj);

	return 0;

err_pages:
1828 1829
	sg_mark_end(sg);
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1830
		page_cache_release(sg_page_iter_page(&sg_iter));
1831 1832
	sg_free_table(st);
	kfree(st);
1833
	return PTR_ERR(page);
1834 1835
}

1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
/* Ensure that the associated pages are gathered from the backing storage
 * and pinned into our object. i915_gem_object_get_pages() may be called
 * multiple times before they are released by a single call to
 * i915_gem_object_put_pages() - once the pages are no longer referenced
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	const struct drm_i915_gem_object_ops *ops = obj->ops;
	int ret;

1850
	if (obj->pages)
1851 1852
		return 0;

1853 1854 1855 1856 1857
	if (obj->madv != I915_MADV_WILLNEED) {
		DRM_ERROR("Attempting to obtain a purgeable object\n");
		return -EINVAL;
	}

1858 1859
	BUG_ON(obj->pages_pin_count);

1860 1861 1862 1863 1864 1865
	ret = ops->get_pages(obj);
	if (ret)
		return ret;

	list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
	return 0;
1866 1867
}

1868
void
1869
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1870
			       struct intel_ring_buffer *ring)
1871
{
1872
	struct drm_device *dev = obj->base.dev;
1873
	struct drm_i915_private *dev_priv = dev->dev_private;
1874
	u32 seqno = intel_ring_get_seqno(ring);
1875

1876
	BUG_ON(ring == NULL);
1877
	obj->ring = ring;
1878 1879

	/* Add a reference if we're newly entering the active list. */
1880 1881 1882
	if (!obj->active) {
		drm_gem_object_reference(&obj->base);
		obj->active = 1;
1883
	}
1884

1885
	/* Move from whatever list we were on to the tail of execution. */
1886 1887
	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
	list_move_tail(&obj->ring_list, &ring->active_list);
1888

1889
	obj->last_read_seqno = seqno;
1890

1891
	if (obj->fenced_gpu_access) {
1892 1893
		obj->last_fenced_seqno = seqno;

1894 1895 1896 1897 1898 1899 1900 1901
		/* Bump MRU to take account of the delayed flush */
		if (obj->fence_reg != I915_FENCE_REG_NONE) {
			struct drm_i915_fence_reg *reg;

			reg = &dev_priv->fence_regs[obj->fence_reg];
			list_move_tail(&reg->lru_list,
				       &dev_priv->mm.fence_list);
		}
1902 1903 1904 1905 1906
	}
}

static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1907
{
1908
	struct drm_device *dev = obj->base.dev;
1909
	struct drm_i915_private *dev_priv = dev->dev_private;
1910

1911
	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1912
	BUG_ON(!obj->active);
1913

1914
	list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1915

1916
	list_del_init(&obj->ring_list);
1917 1918
	obj->ring = NULL;

1919 1920 1921 1922 1923
	obj->last_read_seqno = 0;
	obj->last_write_seqno = 0;
	obj->base.write_domain = 0;

	obj->last_fenced_seqno = 0;
1924 1925 1926 1927 1928 1929
	obj->fenced_gpu_access = false;

	obj->active = 0;
	drm_gem_object_unreference(&obj->base);

	WARN_ON(i915_verify_lists(dev));
1930
}
1931

1932
static int
1933
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1934
{
1935 1936 1937
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	int ret, i, j;
1938

1939
	/* Carefully retire all requests without writing to the rings */
1940
	for_each_ring(ring, dev_priv, i) {
1941 1942 1943
		ret = intel_ring_idle(ring);
		if (ret)
			return ret;
1944 1945
	}
	i915_gem_retire_requests(dev);
1946 1947

	/* Finally reset hw state */
1948
	for_each_ring(ring, dev_priv, i) {
1949
		intel_ring_init_seqno(ring, seqno);
1950

1951 1952 1953
		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
			ring->sync_seqno[j] = 0;
	}
1954

1955
	return 0;
1956 1957
}

1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	if (seqno == 0)
		return -EINVAL;

	/* HWS page needs to be set less than what we
	 * will inject to ring
	 */
	ret = i915_gem_init_seqno(dev, seqno - 1);
	if (ret)
		return ret;

	/* Carefully set the last_seqno value so that wrap
	 * detection still works
	 */
	dev_priv->next_seqno = seqno;
	dev_priv->last_seqno = seqno - 1;
	if (dev_priv->last_seqno == 0)
		dev_priv->last_seqno--;

	return 0;
}

1984 1985
int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1986
{
1987 1988 1989 1990
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* reserve 0 for non-seqno */
	if (dev_priv->next_seqno == 0) {
1991
		int ret = i915_gem_init_seqno(dev, 0);
1992 1993
		if (ret)
			return ret;
1994

1995 1996
		dev_priv->next_seqno = 1;
	}
1997

1998
	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
1999
	return 0;
2000 2001
}

2002
int
C
Chris Wilson 已提交
2003
i915_add_request(struct intel_ring_buffer *ring,
2004
		 struct drm_file *file,
2005
		 u32 *out_seqno)
2006
{
C
Chris Wilson 已提交
2007
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
2008
	struct drm_i915_gem_request *request;
2009
	u32 request_ring_position;
2010
	int was_empty;
2011 2012
	int ret;

2013 2014 2015 2016 2017 2018 2019
	/*
	 * Emit any outstanding flushes - execbuf can fail to emit the flush
	 * after having emitted the batchbuffer command. Hence we need to fix
	 * things up similar to emitting the lazy request. The difference here
	 * is that the flush _must_ happen before the next request, no matter
	 * what.
	 */
2020 2021 2022
	ret = intel_ring_flush_all_caches(ring);
	if (ret)
		return ret;
2023

2024 2025 2026
	request = kmalloc(sizeof(*request), GFP_KERNEL);
	if (request == NULL)
		return -ENOMEM;
2027

2028

2029 2030 2031 2032 2033 2034 2035
	/* Record the position of the start of the request so that
	 * should we detect the updated seqno part-way through the
	 * GPU processing the request, we never over-estimate the
	 * position of the head.
	 */
	request_ring_position = intel_ring_get_tail(ring);

2036
	ret = ring->add_request(ring);
2037 2038 2039 2040
	if (ret) {
		kfree(request);
		return ret;
	}
2041

2042
	request->seqno = intel_ring_get_seqno(ring);
2043
	request->ring = ring;
2044
	request->tail = request_ring_position;
2045 2046 2047 2048 2049
	request->ctx = ring->last_context;

	if (request->ctx)
		i915_gem_context_reference(request->ctx);

2050
	request->emitted_jiffies = jiffies;
2051 2052
	was_empty = list_empty(&ring->request_list);
	list_add_tail(&request->list, &ring->request_list);
2053
	request->file_priv = NULL;
2054

C
Chris Wilson 已提交
2055 2056 2057
	if (file) {
		struct drm_i915_file_private *file_priv = file->driver_priv;

2058
		spin_lock(&file_priv->mm.lock);
2059
		request->file_priv = file_priv;
2060
		list_add_tail(&request->client_list,
2061
			      &file_priv->mm.request_list);
2062
		spin_unlock(&file_priv->mm.lock);
2063
	}
2064

2065
	trace_i915_gem_request_add(ring, request->seqno);
2066
	ring->outstanding_lazy_request = 0;
C
Chris Wilson 已提交
2067

B
Ben Gamari 已提交
2068
	if (!dev_priv->mm.suspended) {
2069
		if (i915_enable_hangcheck) {
2070
			mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2071
				  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2072
		}
2073
		if (was_empty) {
2074
			queue_delayed_work(dev_priv->wq,
2075 2076
					   &dev_priv->mm.retire_work,
					   round_jiffies_up_relative(HZ));
2077 2078
			intel_mark_busy(dev_priv->dev);
		}
B
Ben Gamari 已提交
2079
	}
2080

2081
	if (out_seqno)
2082
		*out_seqno = request->seqno;
2083
	return 0;
2084 2085
}

2086 2087
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2088
{
2089
	struct drm_i915_file_private *file_priv = request->file_priv;
2090

2091 2092
	if (!file_priv)
		return;
C
Chris Wilson 已提交
2093

2094
	spin_lock(&file_priv->mm.lock);
2095 2096 2097 2098
	if (request->file_priv) {
		list_del(&request->client_list);
		request->file_priv = NULL;
	}
2099
	spin_unlock(&file_priv->mm.lock);
2100 2101
}

2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
	list_del(&request->list);
	i915_gem_request_remove_from_client(request);

	if (request->ctx)
		i915_gem_context_unreference(request->ctx);

	kfree(request);
}

2113 2114
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
				      struct intel_ring_buffer *ring)
2115
{
2116 2117
	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;
2118

2119 2120 2121
		request = list_first_entry(&ring->request_list,
					   struct drm_i915_gem_request,
					   list);
2122

2123
		i915_gem_free_request(request);
2124
	}
2125

2126
	while (!list_empty(&ring->active_list)) {
2127
		struct drm_i915_gem_object *obj;
2128

2129 2130 2131
		obj = list_first_entry(&ring->active_list,
				       struct drm_i915_gem_object,
				       ring_list);
2132

2133
		i915_gem_object_move_to_inactive(obj);
2134 2135 2136
	}
}

2137 2138 2139 2140 2141
static void i915_gem_reset_fences(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

2142
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
2143
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2144

2145 2146
		if (reg->obj)
			i915_gem_object_fence_lost(reg->obj);
2147

2148 2149
		i915_gem_write_fence(dev, i, NULL);

2150 2151 2152
		reg->pin_count = 0;
		reg->obj = NULL;
		INIT_LIST_HEAD(&reg->lru_list);
2153
	}
2154 2155

	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2156 2157
}

2158
void i915_gem_reset(struct drm_device *dev)
2159
{
2160
	struct drm_i915_private *dev_priv = dev->dev_private;
2161
	struct drm_i915_gem_object *obj;
2162
	struct intel_ring_buffer *ring;
2163
	int i;
2164

2165 2166
	for_each_ring(ring, dev_priv, i)
		i915_gem_reset_ring_lists(dev_priv, ring);
2167 2168 2169 2170

	/* Move everything out of the GPU domains to ensure we do any
	 * necessary invalidation upon reuse.
	 */
2171
	list_for_each_entry(obj,
2172
			    &dev_priv->mm.inactive_list,
2173
			    mm_list)
2174
	{
2175
		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2176
	}
2177 2178

	/* The fence registers are invalidated so clear them out */
2179
	i915_gem_reset_fences(dev);
2180 2181 2182 2183 2184
}

/**
 * This function clears the request list as sequence numbers are passed.
 */
2185
void
C
Chris Wilson 已提交
2186
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2187 2188 2189
{
	uint32_t seqno;

C
Chris Wilson 已提交
2190
	if (list_empty(&ring->request_list))
2191 2192
		return;

C
Chris Wilson 已提交
2193
	WARN_ON(i915_verify_lists(ring->dev));
2194

2195
	seqno = ring->get_seqno(ring, true);
2196

2197
	while (!list_empty(&ring->request_list)) {
2198 2199
		struct drm_i915_gem_request *request;

2200
		request = list_first_entry(&ring->request_list,
2201 2202 2203
					   struct drm_i915_gem_request,
					   list);

2204
		if (!i915_seqno_passed(seqno, request->seqno))
2205 2206
			break;

C
Chris Wilson 已提交
2207
		trace_i915_gem_request_retire(ring, request->seqno);
2208 2209 2210 2211 2212 2213
		/* We know the GPU must have read the request to have
		 * sent us the seqno + interrupt, so use the position
		 * of tail of the request to update the last known position
		 * of the GPU head.
		 */
		ring->last_retired_head = request->tail;
2214

2215
		i915_gem_free_request(request);
2216
	}
2217

2218 2219 2220 2221
	/* Move any buffers on the active list that are no longer referenced
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
	 */
	while (!list_empty(&ring->active_list)) {
2222
		struct drm_i915_gem_object *obj;
2223

2224
		obj = list_first_entry(&ring->active_list,
2225 2226
				      struct drm_i915_gem_object,
				      ring_list);
2227

2228
		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2229
			break;
2230

2231
		i915_gem_object_move_to_inactive(obj);
2232
	}
2233

C
Chris Wilson 已提交
2234 2235
	if (unlikely(ring->trace_irq_seqno &&
		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2236
		ring->irq_put(ring);
C
Chris Wilson 已提交
2237
		ring->trace_irq_seqno = 0;
2238
	}
2239

C
Chris Wilson 已提交
2240
	WARN_ON(i915_verify_lists(ring->dev));
2241 2242
}

2243 2244 2245 2246
void
i915_gem_retire_requests(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
2247
	struct intel_ring_buffer *ring;
2248
	int i;
2249

2250 2251
	for_each_ring(ring, dev_priv, i)
		i915_gem_retire_requests_ring(ring);
2252 2253
}

2254
static void
2255 2256 2257 2258
i915_gem_retire_work_handler(struct work_struct *work)
{
	drm_i915_private_t *dev_priv;
	struct drm_device *dev;
2259
	struct intel_ring_buffer *ring;
2260 2261
	bool idle;
	int i;
2262 2263 2264 2265 2266

	dev_priv = container_of(work, drm_i915_private_t,
				mm.retire_work.work);
	dev = dev_priv->dev;

2267 2268
	/* Come back later if the device is busy... */
	if (!mutex_trylock(&dev->struct_mutex)) {
2269 2270
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
				   round_jiffies_up_relative(HZ));
2271 2272
		return;
	}
2273

2274
	i915_gem_retire_requests(dev);
2275

2276 2277
	/* Send a periodic flush down the ring so we don't hold onto GEM
	 * objects indefinitely.
2278
	 */
2279
	idle = true;
2280
	for_each_ring(ring, dev_priv, i) {
2281 2282
		if (ring->gpu_caches_dirty)
			i915_add_request(ring, NULL, NULL);
2283 2284

		idle &= list_empty(&ring->request_list);
2285 2286
	}

2287
	if (!dev_priv->mm.suspended && !idle)
2288 2289
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
				   round_jiffies_up_relative(HZ));
2290 2291
	if (idle)
		intel_mark_idle(dev);
2292

2293 2294 2295
	mutex_unlock(&dev->struct_mutex);
}

2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
/**
 * Ensures that an object will eventually get non-busy by flushing any required
 * write domains, emitting any outstanding lazy request and retiring and
 * completed requests.
 */
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
	int ret;

	if (obj->active) {
2307
		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2308 2309 2310 2311 2312 2313 2314 2315 2316
		if (ret)
			return ret;

		i915_gem_retire_requests_ring(obj->ring);
	}

	return 0;
}

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
 * @DRM_IOCTL_ARGS: standard ioctl arguments
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
2342
	drm_i915_private_t *dev_priv = dev->dev_private;
2343 2344 2345
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
	struct intel_ring_buffer *ring = NULL;
2346
	struct timespec timeout_stack, *timeout = NULL;
2347
	unsigned reset_counter;
2348 2349 2350
	u32 seqno = 0;
	int ret = 0;

2351 2352 2353 2354
	if (args->timeout_ns >= 0) {
		timeout_stack = ns_to_timespec(args->timeout_ns);
		timeout = &timeout_stack;
	}
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
	if (&obj->base == NULL) {
		mutex_unlock(&dev->struct_mutex);
		return -ENOENT;
	}

2366 2367
	/* Need to make sure the object gets inactive eventually. */
	ret = i915_gem_object_flush_active(obj);
2368 2369 2370 2371
	if (ret)
		goto out;

	if (obj->active) {
2372
		seqno = obj->last_read_seqno;
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387
		ring = obj->ring;
	}

	if (seqno == 0)
		 goto out;

	/* Do this after OLR check to make sure we make forward progress polling
	 * on this IOCTL with a 0 timeout (like busy ioctl)
	 */
	if (!args->timeout_ns) {
		ret = -ETIME;
		goto out;
	}

	drm_gem_object_unreference(&obj->base);
2388
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2389 2390
	mutex_unlock(&dev->struct_mutex);

2391
	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2392
	if (timeout)
2393
		args->timeout_ns = timespec_to_ns(timeout);
2394 2395 2396 2397 2398 2399 2400 2401
	return ret;

out:
	drm_gem_object_unreference(&obj->base);
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
/**
 * i915_gem_object_sync - sync an object to a ring.
 *
 * @obj: object which may be in use on another ring.
 * @to: ring we wish to use the object on. May be NULL.
 *
 * This code is meant to abstract object synchronization with the GPU.
 * Calling with NULL implies synchronizing the object with the CPU
 * rather than a particular GPU ring.
 *
 * Returns 0 if successful, else propagates up the lower layer error.
 */
2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424
int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
		     struct intel_ring_buffer *to)
{
	struct intel_ring_buffer *from = obj->ring;
	u32 seqno;
	int ret, idx;

	if (from == NULL || to == from)
		return 0;

2425
	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2426
		return i915_gem_object_wait_rendering(obj, false);
2427 2428 2429

	idx = intel_ring_sync_index(from, to);

2430
	seqno = obj->last_read_seqno;
2431 2432 2433
	if (seqno <= from->sync_seqno[idx])
		return 0;

2434 2435 2436
	ret = i915_gem_check_olr(obj->ring, seqno);
	if (ret)
		return ret;
2437

2438
	ret = to->sync_to(to, from, seqno);
2439
	if (!ret)
2440 2441 2442 2443 2444
		/* We use last_read_seqno because sync_to()
		 * might have just caused seqno wrap under
		 * the radar.
		 */
		from->sync_seqno[idx] = obj->last_read_seqno;
2445

2446
	return ret;
2447 2448
}

2449 2450 2451 2452 2453 2454 2455
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
	u32 old_write_domain, old_read_domains;

	/* Force a pagefault for domain tracking on next user access */
	i915_gem_release_mmap(obj);

2456 2457 2458
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		return;

2459 2460 2461
	/* Wait for any direct GTT access to complete */
	mb();

2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
	old_read_domains = obj->base.read_domains;
	old_write_domain = obj->base.write_domain;

	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);
}

2473 2474 2475
/**
 * Unbinds an object from the GTT aperture.
 */
2476
int
2477
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2478
{
2479
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2480
	int ret;
2481

2482
	if (obj->gtt_space == NULL)
2483 2484
		return 0;

2485 2486
	if (obj->pin_count)
		return -EBUSY;
2487

2488 2489
	BUG_ON(obj->pages == NULL);

2490
	ret = i915_gem_object_finish_gpu(obj);
2491
	if (ret)
2492 2493 2494 2495 2496 2497
		return ret;
	/* Continue on if we fail due to EIO, the GPU is hung so we
	 * should be safe and we need to cleanup or else we might
	 * cause memory corruption through use-after-free.
	 */

2498
	i915_gem_object_finish_gtt(obj);
2499

2500
	/* release the fence reg _after_ flushing */
2501
	ret = i915_gem_object_put_fence(obj);
2502
	if (ret)
2503
		return ret;
2504

C
Chris Wilson 已提交
2505 2506
	trace_i915_gem_object_unbind(obj);

2507 2508
	if (obj->has_global_gtt_mapping)
		i915_gem_gtt_unbind_object(obj);
2509 2510 2511 2512
	if (obj->has_aliasing_ppgtt_mapping) {
		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
		obj->has_aliasing_ppgtt_mapping = 0;
	}
2513
	i915_gem_gtt_finish_object(obj);
2514

C
Chris Wilson 已提交
2515 2516
	list_del(&obj->mm_list);
	list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2517
	/* Avoid an unnecessary call to unbind on rebind. */
2518
	obj->map_and_fenceable = true;
2519

2520 2521 2522
	drm_mm_put_block(obj->gtt_space);
	obj->gtt_space = NULL;
	obj->gtt_offset = 0;
2523

2524
	return 0;
2525 2526
}

2527
int i915_gpu_idle(struct drm_device *dev)
2528 2529
{
	drm_i915_private_t *dev_priv = dev->dev_private;
2530
	struct intel_ring_buffer *ring;
2531
	int ret, i;
2532 2533

	/* Flush everything onto the inactive list. */
2534
	for_each_ring(ring, dev_priv, i) {
2535 2536 2537 2538
		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
		if (ret)
			return ret;

2539
		ret = intel_ring_idle(ring);
2540 2541 2542
		if (ret)
			return ret;
	}
2543

2544
	return 0;
2545 2546
}

2547 2548
static void i965_write_fence_reg(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj)
2549 2550
{
	drm_i915_private_t *dev_priv = dev->dev_private;
2551 2552
	int fence_reg;
	int fence_pitch_shift;
2553 2554
	uint64_t val;

2555 2556 2557 2558 2559 2560 2561 2562
	if (INTEL_INFO(dev)->gen >= 6) {
		fence_reg = FENCE_REG_SANDYBRIDGE_0;
		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
	} else {
		fence_reg = FENCE_REG_965_0;
		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
	}

2563 2564
	if (obj) {
		u32 size = obj->gtt_space->size;
2565

2566 2567 2568
		val = (uint64_t)((obj->gtt_offset + size - 4096) &
				 0xfffff000) << 32;
		val |= obj->gtt_offset & 0xfffff000;
2569
		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2570 2571 2572 2573 2574
		if (obj->tiling_mode == I915_TILING_Y)
			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
		val |= I965_FENCE_REG_VALID;
	} else
		val = 0;
2575

2576 2577 2578
	fence_reg += reg * 8;
	I915_WRITE64(fence_reg, val);
	POSTING_READ(fence_reg);
2579 2580
}

2581 2582
static void i915_write_fence_reg(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj)
2583 2584
{
	drm_i915_private_t *dev_priv = dev->dev_private;
2585
	u32 val;
2586

2587 2588 2589 2590
	if (obj) {
		u32 size = obj->gtt_space->size;
		int pitch_val;
		int tile_width;
2591

2592 2593 2594 2595 2596
		WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
		     (size & -size) != size ||
		     (obj->gtt_offset & (size - 1)),
		     "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
		     obj->gtt_offset, obj->map_and_fenceable, size);
2597

2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
			tile_width = 128;
		else
			tile_width = 512;

		/* Note: pitch better be a power of two tile widths */
		pitch_val = obj->stride / tile_width;
		pitch_val = ffs(pitch_val) - 1;

		val = obj->gtt_offset;
		if (obj->tiling_mode == I915_TILING_Y)
			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
		val |= I915_FENCE_SIZE_BITS(size);
		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
		val |= I830_FENCE_REG_VALID;
	} else
		val = 0;

	if (reg < 8)
		reg = FENCE_REG_830_0 + reg * 4;
	else
		reg = FENCE_REG_945_8 + (reg - 8) * 4;

	I915_WRITE(reg, val);
	POSTING_READ(reg);
2623 2624
}

2625 2626
static void i830_write_fence_reg(struct drm_device *dev, int reg,
				struct drm_i915_gem_object *obj)
2627 2628 2629 2630
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t val;

2631 2632 2633
	if (obj) {
		u32 size = obj->gtt_space->size;
		uint32_t pitch_val;
2634

2635 2636 2637 2638 2639
		WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
		     (size & -size) != size ||
		     (obj->gtt_offset & (size - 1)),
		     "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
		     obj->gtt_offset, size);
2640

2641 2642
		pitch_val = obj->stride / 128;
		pitch_val = ffs(pitch_val) - 1;
2643

2644 2645 2646 2647 2648 2649 2650 2651
		val = obj->gtt_offset;
		if (obj->tiling_mode == I915_TILING_Y)
			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
		val |= I830_FENCE_SIZE_BITS(size);
		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
		val |= I830_FENCE_REG_VALID;
	} else
		val = 0;
2652

2653 2654 2655 2656
	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
	POSTING_READ(FENCE_REG_830_0 + reg * 4);
}

2657 2658 2659 2660 2661
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
{
	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
}

2662 2663 2664
static void i915_gem_write_fence(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj)
{
2665 2666 2667 2668 2669 2670 2671 2672
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* Ensure that all CPU reads are completed before installing a fence
	 * and all writes before removing the fence.
	 */
	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
		mb();

2673 2674
	switch (INTEL_INFO(dev)->gen) {
	case 7:
2675
	case 6:
2676 2677 2678 2679
	case 5:
	case 4: i965_write_fence_reg(dev, reg, obj); break;
	case 3: i915_write_fence_reg(dev, reg, obj); break;
	case 2: i830_write_fence_reg(dev, reg, obj); break;
2680
	default: BUG();
2681
	}
2682 2683 2684 2685 2686 2687

	/* And similarly be paranoid that no direct access to this region
	 * is reordered to before the fence is installed.
	 */
	if (i915_gem_object_needs_mb(obj))
		mb();
2688 2689
}

2690 2691 2692 2693 2694 2695
static inline int fence_number(struct drm_i915_private *dev_priv,
			       struct drm_i915_fence_reg *fence)
{
	return fence - dev_priv->fence_regs;
}

2696 2697 2698 2699 2700 2701
struct write_fence {
	struct drm_device *dev;
	struct drm_i915_gem_object *obj;
	int fence;
};

2702 2703
static void i915_gem_write_fence__ipi(void *data)
{
2704 2705 2706
	struct write_fence *args = data;

	/* Required for SNB+ with LLC */
2707
	wbinvd();
2708 2709 2710

	/* Required for VLV */
	i915_gem_write_fence(args->dev, args->fence, args->obj);
2711 2712
}

2713 2714 2715 2716
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
					 struct drm_i915_fence_reg *fence,
					 bool enable)
{
2717 2718 2719 2720 2721 2722
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
	struct write_fence args = {
		.dev = obj->base.dev,
		.fence = fence_number(dev_priv, fence),
		.obj = enable ? obj : NULL,
	};
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732

	/* In order to fully serialize access to the fenced region and
	 * the update to the fence register we need to take extreme
	 * measures on SNB+. In theory, the write to the fence register
	 * flushes all memory transactions before, and coupled with the
	 * mb() placed around the register write we serialise all memory
	 * operations with respect to the changes in the tiler. Yet, on
	 * SNB+ we need to take a step further and emit an explicit wbinvd()
	 * on each processor in order to manually flush all memory
	 * transactions before updating the fence register.
2733 2734 2735 2736 2737
	 *
	 * However, Valleyview complicates matter. There the wbinvd is
	 * insufficient and unlike SNB/IVB requires the serialising
	 * register write. (Note that that register write by itself is
	 * conversely not sufficient for SNB+.) To compromise, we do both.
2738
	 */
2739 2740 2741 2742
	if (INTEL_INFO(args.dev)->gen >= 6)
		on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
	else
		i915_gem_write_fence(args.dev, args.fence, args.obj);
2743 2744

	if (enable) {
2745
		obj->fence_reg = args.fence;
2746 2747 2748 2749 2750 2751 2752 2753 2754
		fence->obj = obj;
		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
	} else {
		obj->fence_reg = I915_FENCE_REG_NONE;
		fence->obj = NULL;
		list_del_init(&fence->lru_list);
	}
}

2755
static int
2756
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2757
{
2758
	if (obj->last_fenced_seqno) {
2759
		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2760 2761
		if (ret)
			return ret;
2762 2763 2764 2765

		obj->last_fenced_seqno = 0;
	}

2766
	obj->fenced_gpu_access = false;
2767 2768 2769 2770 2771 2772
	return 0;
}

int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
2773
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2774
	struct drm_i915_fence_reg *fence;
2775 2776
	int ret;

2777
	ret = i915_gem_object_wait_fence(obj);
2778 2779 2780
	if (ret)
		return ret;

2781 2782
	if (obj->fence_reg == I915_FENCE_REG_NONE)
		return 0;
2783

2784 2785
	fence = &dev_priv->fence_regs[obj->fence_reg];

2786
	i915_gem_object_fence_lost(obj);
2787
	i915_gem_object_update_fence(obj, fence, false);
2788 2789 2790 2791 2792

	return 0;
}

static struct drm_i915_fence_reg *
C
Chris Wilson 已提交
2793
i915_find_fence_reg(struct drm_device *dev)
2794 2795
{
	struct drm_i915_private *dev_priv = dev->dev_private;
C
Chris Wilson 已提交
2796
	struct drm_i915_fence_reg *reg, *avail;
2797
	int i;
2798 2799

	/* First try to find a free reg */
2800
	avail = NULL;
2801 2802 2803
	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
		reg = &dev_priv->fence_regs[i];
		if (!reg->obj)
2804
			return reg;
2805

2806
		if (!reg->pin_count)
2807
			avail = reg;
2808 2809
	}

2810 2811
	if (avail == NULL)
		return NULL;
2812 2813

	/* None available, try to steal one or wait for a user to finish */
2814
	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2815
		if (reg->pin_count)
2816 2817
			continue;

C
Chris Wilson 已提交
2818
		return reg;
2819 2820
	}

C
Chris Wilson 已提交
2821
	return NULL;
2822 2823
}

2824
/**
2825
 * i915_gem_object_get_fence - set up fencing for an object
2826 2827 2828 2829 2830 2831 2832 2833 2834
 * @obj: object to map through a fence reg
 *
 * When mapping objects through the GTT, userspace wants to be able to write
 * to them without having to worry about swizzling if the object is tiled.
 * This function walks the fence regs looking for a free one for @obj,
 * stealing one if it can't find any.
 *
 * It then sets up the reg based on the object's properties: address, pitch
 * and tiling format.
2835 2836
 *
 * For an untiled surface, this removes any existing fence.
2837
 */
2838
int
2839
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2840
{
2841
	struct drm_device *dev = obj->base.dev;
J
Jesse Barnes 已提交
2842
	struct drm_i915_private *dev_priv = dev->dev_private;
2843
	bool enable = obj->tiling_mode != I915_TILING_NONE;
2844
	struct drm_i915_fence_reg *reg;
2845
	int ret;
2846

2847 2848 2849
	/* Have we updated the tiling parameters upon the object and so
	 * will need to serialise the write to the associated fence register?
	 */
2850
	if (obj->fence_dirty) {
2851
		ret = i915_gem_object_wait_fence(obj);
2852 2853 2854
		if (ret)
			return ret;
	}
2855

2856
	/* Just update our place in the LRU if our fence is getting reused. */
2857 2858
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		reg = &dev_priv->fence_regs[obj->fence_reg];
2859
		if (!obj->fence_dirty) {
2860 2861 2862 2863 2864 2865 2866 2867
			list_move_tail(&reg->lru_list,
				       &dev_priv->mm.fence_list);
			return 0;
		}
	} else if (enable) {
		reg = i915_find_fence_reg(dev);
		if (reg == NULL)
			return -EDEADLK;
2868

2869 2870 2871
		if (reg->obj) {
			struct drm_i915_gem_object *old = reg->obj;

2872
			ret = i915_gem_object_wait_fence(old);
2873 2874 2875
			if (ret)
				return ret;

2876
			i915_gem_object_fence_lost(old);
2877
		}
2878
	} else
2879 2880
		return 0;

2881
	i915_gem_object_update_fence(obj, reg, enable);
2882
	obj->fence_dirty = false;
2883

2884
	return 0;
2885 2886
}

2887 2888 2889 2890 2891 2892 2893 2894
static bool i915_gem_valid_gtt_space(struct drm_device *dev,
				     struct drm_mm_node *gtt_space,
				     unsigned long cache_level)
{
	struct drm_mm_node *other;

	/* On non-LLC machines we have to be careful when putting differing
	 * types of snoopable memory together to avoid the prefetcher
2895
	 * crossing memory domains and dying.
2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956
	 */
	if (HAS_LLC(dev))
		return true;

	if (gtt_space == NULL)
		return true;

	if (list_empty(&gtt_space->node_list))
		return true;

	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
	if (other->allocated && !other->hole_follows && other->color != cache_level)
		return false;

	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
		return false;

	return true;
}

static void i915_gem_verify_gtt(struct drm_device *dev)
{
#if WATCH_GTT
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	int err = 0;

	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
		if (obj->gtt_space == NULL) {
			printk(KERN_ERR "object found on GTT list with no space reserved\n");
			err++;
			continue;
		}

		if (obj->cache_level != obj->gtt_space->color) {
			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
			       obj->gtt_space->start,
			       obj->gtt_space->start + obj->gtt_space->size,
			       obj->cache_level,
			       obj->gtt_space->color);
			err++;
			continue;
		}

		if (!i915_gem_valid_gtt_space(dev,
					      obj->gtt_space,
					      obj->cache_level)) {
			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
			       obj->gtt_space->start,
			       obj->gtt_space->start + obj->gtt_space->size,
			       obj->cache_level);
			err++;
			continue;
		}
	}

	WARN_ON(err);
#endif
}

2957 2958 2959 2960
/**
 * Finds free space in the GTT aperture and binds the object there.
 */
static int
2961
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2962
			    unsigned alignment,
2963 2964
			    bool map_and_fenceable,
			    bool nonblocking)
2965
{
2966
	struct drm_device *dev = obj->base.dev;
2967
	drm_i915_private_t *dev_priv = dev->dev_private;
2968
	struct drm_mm_node *node;
2969
	u32 size, fence_size, fence_alignment, unfenced_alignment;
2970
	bool mappable, fenceable;
2971 2972
	size_t gtt_max = map_and_fenceable ?
		dev_priv->gtt.mappable_end : dev_priv->gtt.total;
2973
	int ret;
2974

2975 2976 2977 2978 2979
	fence_size = i915_gem_get_gtt_size(dev,
					   obj->base.size,
					   obj->tiling_mode);
	fence_alignment = i915_gem_get_gtt_alignment(dev,
						     obj->base.size,
2980
						     obj->tiling_mode, true);
2981
	unfenced_alignment =
2982
		i915_gem_get_gtt_alignment(dev,
2983
						    obj->base.size,
2984
						    obj->tiling_mode, false);
2985

2986
	if (alignment == 0)
2987 2988
		alignment = map_and_fenceable ? fence_alignment :
						unfenced_alignment;
2989
	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2990 2991 2992 2993
		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
		return -EINVAL;
	}

2994
	size = map_and_fenceable ? fence_size : obj->base.size;
2995

2996 2997 2998
	/* If the object is bigger than the entire aperture, reject it early
	 * before evicting everything in a vain attempt to find space.
	 */
2999
	if (obj->base.size > gtt_max) {
3000 3001 3002
		DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%ld\n",
			  obj->base.size,
			  map_and_fenceable ? "mappable" : "total",
3003
			  gtt_max);
3004 3005 3006
		return -E2BIG;
	}

3007
	ret = i915_gem_object_get_pages(obj);
C
Chris Wilson 已提交
3008 3009 3010
	if (ret)
		return ret;

3011 3012
	i915_gem_object_pin_pages(obj);

3013 3014 3015 3016 3017 3018
	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (node == NULL) {
		i915_gem_object_unpin_pages(obj);
		return -ENOMEM;
	}

3019 3020 3021 3022
search_free:
	ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
						  size, alignment,
						  obj->cache_level, 0, gtt_max);
3023
	if (ret) {
3024
		ret = i915_gem_evict_something(dev, size, alignment,
3025
					       obj->cache_level,
3026 3027
					       map_and_fenceable,
					       nonblocking);
3028 3029
		if (ret == 0)
			goto search_free;
3030

3031 3032 3033
		i915_gem_object_unpin_pages(obj);
		kfree(node);
		return ret;
3034
	}
3035
	if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
3036
		i915_gem_object_unpin_pages(obj);
3037
		drm_mm_put_block(node);
3038
		return -EINVAL;
3039 3040
	}

3041
	ret = i915_gem_gtt_prepare_object(obj);
3042
	if (ret) {
3043
		i915_gem_object_unpin_pages(obj);
3044
		drm_mm_put_block(node);
C
Chris Wilson 已提交
3045
		return ret;
3046 3047
	}

C
Chris Wilson 已提交
3048
	list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
3049
	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3050

3051 3052
	obj->gtt_space = node;
	obj->gtt_offset = node->start;
C
Chris Wilson 已提交
3053

3054
	fenceable =
3055 3056
		node->size == fence_size &&
		(node->start & (fence_alignment - 1)) == 0;
3057

3058
	mappable =
B
Ben Widawsky 已提交
3059
		obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
3060

3061
	obj->map_and_fenceable = mappable && fenceable;
3062

3063
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
3064
	trace_i915_gem_object_bind(obj, map_and_fenceable);
3065
	i915_gem_verify_gtt(dev);
3066 3067 3068 3069
	return 0;
}

void
3070
i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3071 3072 3073 3074 3075
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
3076
	if (obj->pages == NULL)
3077 3078
		return;

3079 3080 3081 3082 3083 3084 3085
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
	if (obj->stolen)
		return;

3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
	if (obj->cache_level != I915_CACHE_NONE)
		return;

C
Chris Wilson 已提交
3097
	trace_i915_gem_object_clflush(obj);
3098

3099
	drm_clflush_sg(obj->pages);
3100 3101 3102 3103
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3104
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3105
{
C
Chris Wilson 已提交
3106 3107
	uint32_t old_write_domain;

3108
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3109 3110
		return;

3111
	/* No actual flushing is required for the GTT write domain.  Writes
3112 3113
	 * to it immediately go to main memory as far as we know, so there's
	 * no chipset flush.  It also doesn't land in render cache.
3114 3115 3116 3117
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3118
	 */
3119 3120
	wmb();

3121 3122
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3123 3124

	trace_i915_gem_object_change_domain(obj,
3125
					    obj->base.read_domains,
C
Chris Wilson 已提交
3126
					    old_write_domain);
3127 3128 3129 3130
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3131
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3132
{
C
Chris Wilson 已提交
3133
	uint32_t old_write_domain;
3134

3135
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3136 3137 3138
		return;

	i915_gem_clflush_object(obj);
3139
	i915_gem_chipset_flush(obj->base.dev);
3140 3141
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3142 3143

	trace_i915_gem_object_change_domain(obj,
3144
					    obj->base.read_domains,
C
Chris Wilson 已提交
3145
					    old_write_domain);
3146 3147
}

3148 3149 3150 3151 3152 3153
/**
 * Moves a single object to the GTT read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3154
int
3155
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3156
{
3157
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
C
Chris Wilson 已提交
3158
	uint32_t old_write_domain, old_read_domains;
3159
	int ret;
3160

3161
	/* Not valid to be called on unbound objects. */
3162
	if (obj->gtt_space == NULL)
3163 3164
		return -EINVAL;

3165 3166 3167
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3168
	ret = i915_gem_object_wait_rendering(obj, !write);
3169 3170 3171
	if (ret)
		return ret;

3172
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
3173

3174 3175 3176 3177 3178 3179 3180
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3181 3182
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3183

3184 3185 3186
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3187 3188
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3189
	if (write) {
3190 3191 3192
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
		obj->dirty = 1;
3193 3194
	}

C
Chris Wilson 已提交
3195 3196 3197 3198
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3199 3200 3201 3202
	/* And bump the LRU for this access */
	if (i915_gem_object_is_inactive(obj))
		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);

3203 3204 3205
	return 0;
}

3206 3207 3208
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3209 3210
	struct drm_device *dev = obj->base.dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
	int ret;

	if (obj->cache_level == cache_level)
		return 0;

	if (obj->pin_count) {
		DRM_DEBUG("can not change the cache level of pinned objects\n");
		return -EBUSY;
	}

3221 3222 3223 3224 3225 3226
	if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
		ret = i915_gem_object_unbind(obj);
		if (ret)
			return ret;
	}

3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237
	if (obj->gtt_space) {
		ret = i915_gem_object_finish_gpu(obj);
		if (ret)
			return ret;

		i915_gem_object_finish_gtt(obj);

		/* Before SandyBridge, you could not use tiling or fence
		 * registers with snooped memory, so relinquish any fences
		 * currently pointing to our region in the aperture.
		 */
3238
		if (INTEL_INFO(dev)->gen < 6) {
3239 3240 3241 3242 3243
			ret = i915_gem_object_put_fence(obj);
			if (ret)
				return ret;
		}

3244 3245
		if (obj->has_global_gtt_mapping)
			i915_gem_gtt_bind_object(obj, cache_level);
3246 3247 3248
		if (obj->has_aliasing_ppgtt_mapping)
			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
					       obj, cache_level);
3249 3250

		obj->gtt_space->color = cache_level;
3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276
	}

	if (cache_level == I915_CACHE_NONE) {
		u32 old_read_domains, old_write_domain;

		/* If we're coming from LLC cached, then we haven't
		 * actually been tracking whether the data is in the
		 * CPU cache or not, since we only allow one bit set
		 * in obj->write_domain and have been skipping the clflushes.
		 * Just set it to the CPU cache for now.
		 */
		WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
		WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);

		old_read_domains = obj->base.read_domains;
		old_write_domain = obj->base.write_domain;

		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;

		trace_i915_gem_object_change_domain(obj,
						    old_read_domains,
						    old_write_domain);
	}

	obj->cache_level = cache_level;
3277
	i915_gem_verify_gtt(dev);
3278 3279 3280
	return 0;
}

B
Ben Widawsky 已提交
3281 3282
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3283
{
B
Ben Widawsky 已提交
3284
	struct drm_i915_gem_caching *args = data;
3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297
	struct drm_i915_gem_object *obj;
	int ret;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL) {
		ret = -ENOENT;
		goto unlock;
	}

B
Ben Widawsky 已提交
3298
	args->caching = obj->cache_level != I915_CACHE_NONE;
3299 3300 3301 3302 3303 3304 3305

	drm_gem_object_unreference(&obj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

B
Ben Widawsky 已提交
3306 3307
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3308
{
B
Ben Widawsky 已提交
3309
	struct drm_i915_gem_caching *args = data;
3310 3311 3312 3313
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3314 3315
	switch (args->caching) {
	case I915_CACHING_NONE:
3316 3317
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3318
	case I915_CACHING_CACHED:
3319 3320 3321 3322 3323 3324
		level = I915_CACHE_LLC;
		break;
	default:
		return -EINVAL;
	}

B
Ben Widawsky 已提交
3325 3326 3327 3328
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
	if (&obj->base == NULL) {
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);

	drm_gem_object_unreference(&obj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

3343
/*
3344 3345 3346
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3347 3348
 */
int
3349 3350
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3351
				     struct intel_ring_buffer *pipelined)
3352
{
3353
	u32 old_read_domains, old_write_domain;
3354 3355
	int ret;

3356
	if (pipelined != obj->ring) {
3357 3358
		ret = i915_gem_object_sync(obj, pipelined);
		if (ret)
3359 3360 3361
			return ret;
	}

3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
	if (ret)
		return ret;

3375 3376 3377 3378
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
	 * always use map_and_fenceable for all scanout buffers.
	 */
3379
	ret = i915_gem_object_pin(obj, alignment, true, false);
3380 3381 3382
	if (ret)
		return ret;

3383 3384
	i915_gem_object_flush_cpu_write_domain(obj);

3385
	old_write_domain = obj->base.write_domain;
3386
	old_read_domains = obj->base.read_domains;
3387 3388 3389 3390

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3391
	obj->base.write_domain = 0;
3392
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3393 3394 3395

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3396
					    old_write_domain);
3397 3398 3399 3400

	return 0;
}

3401
int
3402
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3403
{
3404 3405
	int ret;

3406
	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3407 3408
		return 0;

3409
	ret = i915_gem_object_wait_rendering(obj, false);
3410 3411 3412
	if (ret)
		return ret;

3413 3414
	/* Ensure that we invalidate the GPU's caches and TLBs. */
	obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3415
	return 0;
3416 3417
}

3418 3419 3420 3421 3422 3423
/**
 * Moves a single object to the CPU read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3424
int
3425
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3426
{
C
Chris Wilson 已提交
3427
	uint32_t old_write_domain, old_read_domains;
3428 3429
	int ret;

3430 3431 3432
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3433
	ret = i915_gem_object_wait_rendering(obj, !write);
3434 3435 3436
	if (ret)
		return ret;

3437
	i915_gem_object_flush_gtt_write_domain(obj);
3438

3439 3440
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3441

3442
	/* Flush the CPU cache if it's still invalid. */
3443
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3444 3445
		i915_gem_clflush_object(obj);

3446
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3447 3448 3449 3450 3451
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3452
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3453 3454 3455 3456 3457

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3458 3459
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3460
	}
3461

C
Chris Wilson 已提交
3462 3463 3464 3465
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3466 3467 3468
	return 0;
}

3469 3470 3471
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3472 3473 3474 3475
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3476 3477 3478
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3479
static int
3480
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3481
{
3482 3483
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_file_private *file_priv = file->driver_priv;
3484
	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3485 3486
	struct drm_i915_gem_request *request;
	struct intel_ring_buffer *ring = NULL;
3487
	unsigned reset_counter;
3488 3489
	u32 seqno = 0;
	int ret;
3490

3491 3492 3493 3494 3495 3496 3497
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
	if (ret)
		return ret;

	ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
	if (ret)
		return ret;
3498

3499
	spin_lock(&file_priv->mm.lock);
3500
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3501 3502
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3503

3504 3505
		ring = request->ring;
		seqno = request->seqno;
3506
	}
3507
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3508
	spin_unlock(&file_priv->mm.lock);
3509

3510 3511
	if (seqno == 0)
		return 0;
3512

3513
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3514 3515
	if (ret == 0)
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3516 3517 3518 3519

	return ret;
}

3520
int
3521 3522
i915_gem_object_pin(struct drm_i915_gem_object *obj,
		    uint32_t alignment,
3523 3524
		    bool map_and_fenceable,
		    bool nonblocking)
3525 3526 3527
{
	int ret;

3528 3529
	if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
		return -EBUSY;
3530

3531 3532 3533 3534
	if (obj->gtt_space != NULL) {
		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
		    (map_and_fenceable && !obj->map_and_fenceable)) {
			WARN(obj->pin_count,
3535
			     "bo is already pinned with incorrect alignment:"
3536 3537
			     " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
			     " obj->map_and_fenceable=%d\n",
3538
			     obj->gtt_offset, alignment,
3539
			     map_and_fenceable,
3540
			     obj->map_and_fenceable);
3541 3542 3543 3544 3545 3546
			ret = i915_gem_object_unbind(obj);
			if (ret)
				return ret;
		}
	}

3547
	if (obj->gtt_space == NULL) {
3548 3549
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;

3550
		ret = i915_gem_object_bind_to_gtt(obj, alignment,
3551 3552
						  map_and_fenceable,
						  nonblocking);
3553
		if (ret)
3554
			return ret;
3555 3556 3557

		if (!dev_priv->mm.aliasing_ppgtt)
			i915_gem_gtt_bind_object(obj, obj->cache_level);
3558
	}
J
Jesse Barnes 已提交
3559

3560 3561 3562
	if (!obj->has_global_gtt_mapping && map_and_fenceable)
		i915_gem_gtt_bind_object(obj, obj->cache_level);

3563
	obj->pin_count++;
3564
	obj->pin_mappable |= map_and_fenceable;
3565 3566 3567 3568 3569

	return 0;
}

void
3570
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3571
{
3572 3573
	BUG_ON(obj->pin_count == 0);
	BUG_ON(obj->gtt_space == NULL);
3574

3575
	if (--obj->pin_count == 0)
3576
		obj->pin_mappable = false;
3577 3578 3579 3580
}

int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3581
		   struct drm_file *file)
3582 3583
{
	struct drm_i915_gem_pin *args = data;
3584
	struct drm_i915_gem_object *obj;
3585 3586
	int ret;

3587 3588 3589
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
3590

3591
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3592
	if (&obj->base == NULL) {
3593 3594
		ret = -ENOENT;
		goto unlock;
3595 3596
	}

3597
	if (obj->madv != I915_MADV_WILLNEED) {
C
Chris Wilson 已提交
3598
		DRM_ERROR("Attempting to pin a purgeable buffer\n");
3599 3600
		ret = -EINVAL;
		goto out;
3601 3602
	}

3603
	if (obj->pin_filp != NULL && obj->pin_filp != file) {
J
Jesse Barnes 已提交
3604 3605
		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
			  args->handle);
3606 3607
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
3608 3609
	}

3610
	if (obj->user_pin_count == 0) {
3611
		ret = i915_gem_object_pin(obj, args->alignment, true, false);
3612 3613
		if (ret)
			goto out;
3614 3615
	}

3616 3617 3618
	obj->user_pin_count++;
	obj->pin_filp = file;

3619 3620 3621
	/* XXX - flush the CPU caches for pinned objects
	 * as the X server doesn't manage domains yet
	 */
3622
	i915_gem_object_flush_cpu_write_domain(obj);
3623
	args->offset = obj->gtt_offset;
3624
out:
3625
	drm_gem_object_unreference(&obj->base);
3626
unlock:
3627
	mutex_unlock(&dev->struct_mutex);
3628
	return ret;
3629 3630 3631 3632
}

int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3633
		     struct drm_file *file)
3634 3635
{
	struct drm_i915_gem_pin *args = data;
3636
	struct drm_i915_gem_object *obj;
3637
	int ret;
3638

3639 3640 3641
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
3642

3643
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3644
	if (&obj->base == NULL) {
3645 3646
		ret = -ENOENT;
		goto unlock;
3647
	}
3648

3649
	if (obj->pin_filp != file) {
J
Jesse Barnes 已提交
3650 3651
		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
			  args->handle);
3652 3653
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
3654
	}
3655 3656 3657
	obj->user_pin_count--;
	if (obj->user_pin_count == 0) {
		obj->pin_filp = NULL;
J
Jesse Barnes 已提交
3658 3659
		i915_gem_object_unpin(obj);
	}
3660

3661
out:
3662
	drm_gem_object_unreference(&obj->base);
3663
unlock:
3664
	mutex_unlock(&dev->struct_mutex);
3665
	return ret;
3666 3667 3668 3669
}

int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3670
		    struct drm_file *file)
3671 3672
{
	struct drm_i915_gem_busy *args = data;
3673
	struct drm_i915_gem_object *obj;
3674 3675
	int ret;

3676
	ret = i915_mutex_lock_interruptible(dev);
3677
	if (ret)
3678
		return ret;
3679

3680
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3681
	if (&obj->base == NULL) {
3682 3683
		ret = -ENOENT;
		goto unlock;
3684
	}
3685

3686 3687 3688 3689
	/* Count all active objects as busy, even if they are currently not used
	 * by the gpu. Users of this interface expect objects to eventually
	 * become non-busy without any further actions, therefore emit any
	 * necessary flushes here.
3690
	 */
3691
	ret = i915_gem_object_flush_active(obj);
3692

3693
	args->busy = obj->active;
3694 3695 3696 3697
	if (obj->ring) {
		BUILD_BUG_ON(I915_NUM_RINGS > 16);
		args->busy |= intel_ring_flag(obj->ring) << 16;
	}
3698

3699
	drm_gem_object_unreference(&obj->base);
3700
unlock:
3701
	mutex_unlock(&dev->struct_mutex);
3702
	return ret;
3703 3704 3705 3706 3707 3708
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
3709
	return i915_gem_ring_throttle(dev, file_priv);
3710 3711
}

3712 3713 3714 3715 3716
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct drm_i915_gem_madvise *args = data;
3717
	struct drm_i915_gem_object *obj;
3718
	int ret;
3719 3720 3721 3722 3723 3724 3725 3726 3727

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

3728 3729 3730 3731
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

3732
	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3733
	if (&obj->base == NULL) {
3734 3735
		ret = -ENOENT;
		goto unlock;
3736 3737
	}

3738
	if (obj->pin_count) {
3739 3740
		ret = -EINVAL;
		goto out;
3741 3742
	}

3743 3744
	if (obj->madv != __I915_MADV_PURGED)
		obj->madv = args->madv;
3745

C
Chris Wilson 已提交
3746 3747
	/* if the object is no longer attached, discard its backing storage */
	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3748 3749
		i915_gem_object_truncate(obj);

3750
	args->retained = obj->madv != __I915_MADV_PURGED;
C
Chris Wilson 已提交
3751

3752
out:
3753
	drm_gem_object_unreference(&obj->base);
3754
unlock:
3755
	mutex_unlock(&dev->struct_mutex);
3756
	return ret;
3757 3758
}

3759 3760
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
3761 3762 3763 3764 3765 3766
{
	INIT_LIST_HEAD(&obj->mm_list);
	INIT_LIST_HEAD(&obj->gtt_list);
	INIT_LIST_HEAD(&obj->ring_list);
	INIT_LIST_HEAD(&obj->exec_list);

3767 3768
	obj->ops = ops;

3769 3770 3771 3772 3773 3774 3775 3776
	obj->fence_reg = I915_FENCE_REG_NONE;
	obj->madv = I915_MADV_WILLNEED;
	/* Avoid an unnecessary call to unbind on the first bind. */
	obj->map_and_fenceable = true;

	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}

3777 3778 3779 3780 3781
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

3782 3783
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
						  size_t size)
3784
{
3785
	struct drm_i915_gem_object *obj;
3786
	struct address_space *mapping;
D
Daniel Vetter 已提交
3787
	gfp_t mask;
3788

3789
	obj = i915_gem_object_alloc(dev);
3790 3791
	if (obj == NULL)
		return NULL;
3792

3793
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3794
		i915_gem_object_free(obj);
3795 3796
		return NULL;
	}
3797

3798 3799 3800 3801 3802 3803 3804
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

A
Al Viro 已提交
3805
	mapping = file_inode(obj->base.filp)->i_mapping;
3806
	mapping_set_gfp_mask(mapping, mask);
3807

3808
	i915_gem_object_init(obj, &i915_gem_object_ops);
3809

3810 3811
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3812

3813 3814
	if (HAS_LLC(dev)) {
		/* On some devices, we can have the GPU use the LLC (the CPU
3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

3830
	return obj;
3831 3832 3833 3834 3835
}

int i915_gem_init_object(struct drm_gem_object *obj)
{
	BUG();
3836

3837 3838 3839
	return 0;
}

3840
void i915_gem_free_object(struct drm_gem_object *gem_obj)
3841
{
3842
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3843
	struct drm_device *dev = obj->base.dev;
3844
	drm_i915_private_t *dev_priv = dev->dev_private;
3845

3846 3847
	trace_i915_gem_object_destroy(obj);

3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862
	if (obj->phys_obj)
		i915_gem_detach_phys_object(dev, obj);

	obj->pin_count = 0;
	if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
		bool was_interruptible;

		was_interruptible = dev_priv->mm.interruptible;
		dev_priv->mm.interruptible = false;

		WARN_ON(i915_gem_object_unbind(obj));

		dev_priv->mm.interruptible = was_interruptible;
	}

3863
	obj->pages_pin_count = 0;
3864
	i915_gem_object_put_pages(obj);
3865
	i915_gem_object_free_mmap_offset(obj);
3866
	i915_gem_object_release_stolen(obj);
3867

3868 3869
	BUG_ON(obj->pages);

3870 3871
	if (obj->base.import_attach)
		drm_prime_gem_destroy(&obj->base, NULL);
3872

3873 3874
	drm_gem_object_release(&obj->base);
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
3875

3876
	kfree(obj->bit_17);
3877
	i915_gem_object_free(obj);
3878 3879
}

3880 3881 3882 3883 3884
int
i915_gem_idle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
3885

3886
	mutex_lock(&dev->struct_mutex);
C
Chris Wilson 已提交
3887

3888
	if (dev_priv->mm.suspended) {
3889 3890
		mutex_unlock(&dev->struct_mutex);
		return 0;
3891 3892
	}

3893
	ret = i915_gpu_idle(dev);
3894 3895
	if (ret) {
		mutex_unlock(&dev->struct_mutex);
3896
		return ret;
3897
	}
3898
	i915_gem_retire_requests(dev);
3899

3900
	/* Under UMS, be paranoid and evict. */
3901
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
C
Chris Wilson 已提交
3902
		i915_gem_evict_everything(dev);
3903

3904 3905
	i915_gem_reset_fences(dev);

3906 3907 3908 3909 3910
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
	 * We need to replace this with a semaphore, or something.
	 * And not confound mm.suspended!
	 */
	dev_priv->mm.suspended = 1;
3911
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3912 3913

	i915_kernel_lost_context(dev);
3914
	i915_gem_cleanup_ringbuffer(dev);
3915

3916 3917
	mutex_unlock(&dev->struct_mutex);

3918 3919 3920
	/* Cancel the retire work handler, which should be idle now. */
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);

3921 3922 3923
	return 0;
}

B
Ben Widawsky 已提交
3924 3925 3926 3927 3928 3929
void i915_gem_l3_remap(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 misccpctl;
	int i;

3930
	if (!HAS_L3_GPU_CACHE(dev))
B
Ben Widawsky 已提交
3931 3932
		return;

3933
	if (!dev_priv->l3_parity.remap_info)
B
Ben Widawsky 已提交
3934 3935 3936 3937 3938 3939 3940 3941
		return;

	misccpctl = I915_READ(GEN7_MISCCPCTL);
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
	POSTING_READ(GEN7_MISCCPCTL);

	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3942
		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
B
Ben Widawsky 已提交
3943 3944
			DRM_DEBUG("0x%x was already programmed to %x\n",
				  GEN7_L3LOG_BASE + i, remap);
3945
		if (remap && !dev_priv->l3_parity.remap_info[i/4])
B
Ben Widawsky 已提交
3946
			DRM_DEBUG_DRIVER("Clearing remapped register\n");
3947
		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
B
Ben Widawsky 已提交
3948 3949 3950 3951 3952 3953 3954 3955
	}

	/* Make sure all the writes land before disabling dop clock gating */
	POSTING_READ(GEN7_L3LOG_BASE);

	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}

3956 3957 3958 3959
void i915_gem_init_swizzling(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

3960
	if (INTEL_INFO(dev)->gen < 5 ||
3961 3962 3963 3964 3965 3966
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

3967 3968 3969
	if (IS_GEN5(dev))
		return;

3970 3971
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
	if (IS_GEN6(dev))
3972
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3973
	else if (IS_GEN7(dev))
3974
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3975 3976
	else
		BUG();
3977
}
D
Daniel Vetter 已提交
3978

3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994
static bool
intel_enable_blt(struct drm_device *dev)
{
	if (!HAS_BLT(dev))
		return false;

	/* The blitter was dysfunctional on early prototypes */
	if (IS_GEN6(dev) && dev->pdev->revision < 8) {
		DRM_INFO("BLT not supported on this pre-production hardware;"
			 " graphics performance will be degraded.\n");
		return false;
	}

	return true;
}

3995
static int i915_gem_init_rings(struct drm_device *dev)
3996
{
3997
	struct drm_i915_private *dev_priv = dev->dev_private;
3998
	int ret;
3999

4000
	ret = intel_init_render_ring_buffer(dev);
4001
	if (ret)
4002
		return ret;
4003 4004

	if (HAS_BSD(dev)) {
4005
		ret = intel_init_bsd_ring_buffer(dev);
4006 4007
		if (ret)
			goto cleanup_render_ring;
4008
	}
4009

4010
	if (intel_enable_blt(dev)) {
4011 4012 4013 4014 4015
		ret = intel_init_blt_ring_buffer(dev);
		if (ret)
			goto cleanup_bsd_ring;
	}

B
Ben Widawsky 已提交
4016 4017 4018 4019 4020 4021 4022
	if (HAS_VEBOX(dev)) {
		ret = intel_init_vebox_ring_buffer(dev);
		if (ret)
			goto cleanup_blt_ring;
	}


4023
	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4024
	if (ret)
B
Ben Widawsky 已提交
4025
		goto cleanup_vebox_ring;
4026 4027 4028

	return 0;

B
Ben Widawsky 已提交
4029 4030
cleanup_vebox_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052
cleanup_blt_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);

	return ret;
}

int
i915_gem_init_hw(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;

	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
		return -EIO;

	if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
		I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);

4053 4054 4055 4056 4057 4058
	if (HAS_PCH_NOP(dev)) {
		u32 temp = I915_READ(GEN7_MSG_CTL);
		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
		I915_WRITE(GEN7_MSG_CTL, temp);
	}

4059 4060 4061 4062 4063
	i915_gem_l3_remap(dev);

	i915_gem_init_swizzling(dev);

	ret = i915_gem_init_rings(dev);
4064 4065 4066
	if (ret)
		return ret;

4067 4068 4069 4070 4071
	/*
	 * XXX: There was some w/a described somewhere suggesting loading
	 * contexts before PPGTT.
	 */
	i915_gem_context_init(dev);
4072 4073 4074 4075 4076 4077 4078
	if (dev_priv->mm.aliasing_ppgtt) {
		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
		if (ret) {
			i915_gem_cleanup_aliasing_ppgtt(dev);
			DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
		}
	}
D
Daniel Vetter 已提交
4079

4080
	return 0;
4081 4082
}

4083 4084 4085 4086 4087 4088
int i915_gem_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	mutex_lock(&dev->struct_mutex);
4089 4090 4091 4092 4093 4094 4095 4096

	if (IS_VALLEYVIEW(dev)) {
		/* VLVA0 (potential hack), BIOS isn't actually waking us */
		I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
		if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
			DRM_DEBUG_DRIVER("allow wake ack timed out\n");
	}

4097
	i915_gem_init_global_gtt(dev);
4098

4099 4100 4101 4102 4103 4104 4105
	ret = i915_gem_init_hw(dev);
	mutex_unlock(&dev->struct_mutex);
	if (ret) {
		i915_gem_cleanup_aliasing_ppgtt(dev);
		return ret;
	}

4106 4107 4108
	/* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		dev_priv->dri1.allow_batchbuffer = 1;
4109 4110 4111
	return 0;
}

4112 4113 4114 4115
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
4116
	struct intel_ring_buffer *ring;
4117
	int i;
4118

4119 4120
	for_each_ring(ring, dev_priv, i)
		intel_cleanup_ring_buffer(ring);
4121 4122
}

4123 4124 4125 4126 4127
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
4128
	int ret;
4129

J
Jesse Barnes 已提交
4130 4131 4132
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

4133
	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4134
		DRM_ERROR("Reenabling wedged hardware, good luck\n");
4135
		atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4136 4137 4138
	}

	mutex_lock(&dev->struct_mutex);
4139 4140
	dev_priv->mm.suspended = 0;

4141
	ret = i915_gem_init_hw(dev);
4142 4143
	if (ret != 0) {
		mutex_unlock(&dev->struct_mutex);
4144
		return ret;
4145
	}
4146

4147
	BUG_ON(!list_empty(&dev_priv->mm.active_list));
4148
	mutex_unlock(&dev->struct_mutex);
4149

4150 4151 4152
	ret = drm_irq_install(dev);
	if (ret)
		goto cleanup_ringbuffer;
4153

4154
	return 0;
4155 4156 4157 4158 4159 4160 4161 4162

cleanup_ringbuffer:
	mutex_lock(&dev->struct_mutex);
	i915_gem_cleanup_ringbuffer(dev);
	dev_priv->mm.suspended = 1;
	mutex_unlock(&dev->struct_mutex);

	return ret;
4163 4164 4165 4166 4167 4168
}

int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
J
Jesse Barnes 已提交
4169 4170 4171
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

4172
	drm_irq_uninstall(dev);
4173
	return i915_gem_idle(dev);
4174 4175 4176 4177 4178 4179 4180
}

void
i915_gem_lastclose(struct drm_device *dev)
{
	int ret;

4181 4182 4183
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return;

4184 4185 4186
	ret = i915_gem_idle(dev);
	if (ret)
		DRM_ERROR("failed to idle hardware: %d\n", ret);
4187 4188
}

4189 4190 4191 4192 4193 4194 4195
static void
init_ring_lists(struct intel_ring_buffer *ring)
{
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
}

4196 4197 4198 4199
void
i915_gem_load(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
4200 4201 4202 4203 4204 4205 4206
	int i;

	dev_priv->slab =
		kmem_cache_create("i915_gem_object",
				  sizeof(struct drm_i915_gem_object), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
4207

4208
	INIT_LIST_HEAD(&dev_priv->mm.active_list);
4209
	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
C
Chris Wilson 已提交
4210 4211
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4212
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4213 4214
	for (i = 0; i < I915_NUM_RINGS; i++)
		init_ring_lists(&dev_priv->ring[i]);
4215
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4216
		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4217 4218
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
			  i915_gem_retire_work_handler);
4219
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4220

4221 4222
	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
	if (IS_GEN3(dev)) {
4223 4224
		I915_WRITE(MI_ARB_STATE,
			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4225 4226
	}

4227 4228
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

4229
	/* Old X drivers will take 0-2 for front, back, depth buffers */
4230 4231
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		dev_priv->fence_reg_start = 3;
4232

4233 4234 4235
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
		dev_priv->num_fence_regs = 32;
	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4236 4237 4238 4239
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4240
	/* Initialize fence registers to zero */
4241
	i915_gem_reset_fences(dev);
4242

4243
	i915_gem_detect_bit_6_swizzle(dev);
4244
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4245

4246 4247
	dev_priv->mm.interruptible = true;

4248 4249 4250
	dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&dev_priv->mm.inactive_shrinker);
4251
}
4252 4253 4254 4255 4256

/*
 * Create a physically contiguous memory object for this object
 * e.g. for cursor + overlay regs
 */
4257 4258
static int i915_gem_init_phys_object(struct drm_device *dev,
				     int id, int size, int align)
4259 4260 4261 4262 4263 4264 4265 4266
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_phys_object *phys_obj;
	int ret;

	if (dev_priv->mm.phys_objs[id - 1] || !size)
		return 0;

4267
	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4268 4269 4270 4271 4272
	if (!phys_obj)
		return -ENOMEM;

	phys_obj->id = id;

4273
	phys_obj->handle = drm_pci_alloc(dev, size, align);
4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285
	if (!phys_obj->handle) {
		ret = -ENOMEM;
		goto kfree_obj;
	}
#ifdef CONFIG_X86
	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif

	dev_priv->mm.phys_objs[id - 1] = phys_obj;

	return 0;
kfree_obj:
4286
	kfree(phys_obj);
4287 4288 4289
	return ret;
}

4290
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_phys_object *phys_obj;

	if (!dev_priv->mm.phys_objs[id - 1])
		return;

	phys_obj = dev_priv->mm.phys_objs[id - 1];
	if (phys_obj->cur_obj) {
		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
	}

#ifdef CONFIG_X86
	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif
	drm_pci_free(dev, phys_obj->handle);
	kfree(phys_obj);
	dev_priv->mm.phys_objs[id - 1] = NULL;
}

void i915_gem_free_all_phys_object(struct drm_device *dev)
{
	int i;

4315
	for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4316 4317 4318 4319
		i915_gem_free_phys_object(dev, i);
}

void i915_gem_detach_phys_object(struct drm_device *dev,
4320
				 struct drm_i915_gem_object *obj)
4321
{
A
Al Viro 已提交
4322
	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4323
	char *vaddr;
4324 4325 4326
	int i;
	int page_count;

4327
	if (!obj->phys_obj)
4328
		return;
4329
	vaddr = obj->phys_obj->handle->vaddr;
4330

4331
	page_count = obj->base.size / PAGE_SIZE;
4332
	for (i = 0; i < page_count; i++) {
4333
		struct page *page = shmem_read_mapping_page(mapping, i);
4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344
		if (!IS_ERR(page)) {
			char *dst = kmap_atomic(page);
			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
			kunmap_atomic(dst);

			drm_clflush_pages(&page, 1);

			set_page_dirty(page);
			mark_page_accessed(page);
			page_cache_release(page);
		}
4345
	}
4346
	i915_gem_chipset_flush(dev);
4347

4348 4349
	obj->phys_obj->cur_obj = NULL;
	obj->phys_obj = NULL;
4350 4351 4352 4353
}

int
i915_gem_attach_phys_object(struct drm_device *dev,
4354
			    struct drm_i915_gem_object *obj,
4355 4356
			    int id,
			    int align)
4357
{
A
Al Viro 已提交
4358
	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4359 4360 4361 4362 4363 4364 4365 4366
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret = 0;
	int page_count;
	int i;

	if (id > I915_MAX_PHYS_OBJECT)
		return -EINVAL;

4367 4368
	if (obj->phys_obj) {
		if (obj->phys_obj->id == id)
4369 4370 4371 4372 4373 4374 4375
			return 0;
		i915_gem_detach_phys_object(dev, obj);
	}

	/* create a new object */
	if (!dev_priv->mm.phys_objs[id - 1]) {
		ret = i915_gem_init_phys_object(dev, id,
4376
						obj->base.size, align);
4377
		if (ret) {
4378 4379
			DRM_ERROR("failed to init phys object %d size: %zu\n",
				  id, obj->base.size);
4380
			return ret;
4381 4382 4383 4384
		}
	}

	/* bind to the object */
4385 4386
	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
	obj->phys_obj->cur_obj = obj;
4387

4388
	page_count = obj->base.size / PAGE_SIZE;
4389 4390

	for (i = 0; i < page_count; i++) {
4391 4392 4393
		struct page *page;
		char *dst, *src;

4394
		page = shmem_read_mapping_page(mapping, i);
4395 4396
		if (IS_ERR(page))
			return PTR_ERR(page);
4397

4398
		src = kmap_atomic(page);
4399
		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4400
		memcpy(dst, src, PAGE_SIZE);
P
Peter Zijlstra 已提交
4401
		kunmap_atomic(src);
4402

4403 4404 4405
		mark_page_accessed(page);
		page_cache_release(page);
	}
4406

4407 4408 4409 4410
	return 0;
}

static int
4411 4412
i915_gem_phys_pwrite(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
4413 4414 4415
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
4416
	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
V
Ville Syrjälä 已提交
4417
	char __user *user_data = to_user_ptr(args->data_ptr);
4418

4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
		if (unwritten)
			return -EFAULT;
	}
4432

4433
	i915_gem_chipset_flush(dev);
4434 4435
	return 0;
}
4436

4437
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4438
{
4439
	struct drm_i915_file_private *file_priv = file->driver_priv;
4440 4441 4442 4443 4444

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4445
	spin_lock(&file_priv->mm.lock);
4446 4447 4448 4449 4450 4451 4452 4453 4454
	while (!list_empty(&file_priv->mm.request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&file_priv->mm.request_list,
					   struct drm_i915_gem_request,
					   client_list);
		list_del(&request->client_list);
		request->file_priv = NULL;
	}
4455
	spin_unlock(&file_priv->mm.lock);
4456
}
4457

4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
	if (!mutex_is_locked(mutex))
		return false;

#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
	return mutex->owner == task;
#else
	/* Since UP may be pre-empted, we cannot assume that we own the lock */
	return false;
#endif
}

4471
static int
4472
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4473
{
4474 4475 4476 4477 4478
	struct drm_i915_private *dev_priv =
		container_of(shrinker,
			     struct drm_i915_private,
			     mm.inactive_shrinker);
	struct drm_device *dev = dev_priv->dev;
C
Chris Wilson 已提交
4479
	struct drm_i915_gem_object *obj;
4480
	int nr_to_scan = sc->nr_to_scan;
4481
	bool unlock = true;
4482 4483
	int cnt;

4484 4485 4486 4487
	if (!mutex_trylock(&dev->struct_mutex)) {
		if (!mutex_is_locked_by(&dev->struct_mutex, current))
			return 0;

4488 4489 4490
		if (dev_priv->mm.shrinker_no_lock_stealing)
			return 0;

4491 4492
		unlock = false;
	}
4493

C
Chris Wilson 已提交
4494 4495
	if (nr_to_scan) {
		nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4496 4497 4498
		if (nr_to_scan > 0)
			nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
							false);
C
Chris Wilson 已提交
4499 4500
		if (nr_to_scan > 0)
			i915_gem_shrink_all(dev_priv);
4501 4502
	}

4503
	cnt = 0;
C
Chris Wilson 已提交
4504
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4505 4506
		if (obj->pages_pin_count == 0)
			cnt += obj->base.size >> PAGE_SHIFT;
4507
	list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
4508
		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
C
Chris Wilson 已提交
4509
			cnt += obj->base.size >> PAGE_SHIFT;
4510

4511 4512
	if (unlock)
		mutex_unlock(&dev->struct_mutex);
C
Chris Wilson 已提交
4513
	return cnt;
4514
}