i915_gem.c 129.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
C
Chris Wilson 已提交
32
#include "i915_trace.h"
33
#include "intel_drv.h"
34
#include <linux/slab.h>
35
#include <linux/swap.h>
J
Jesse Barnes 已提交
36
#include <linux/pci.h>
37
#include <linux/intel-gtt.h>
38

39
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
40 41 42

static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
						  bool pipelined);
43 44 45 46 47 48 49 50
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
					     int write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
						     uint64_t offset,
						     uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
51 52
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
					  bool interruptible);
53 54 55
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
					   unsigned alignment);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
56 57 58
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
				struct drm_i915_gem_pwrite *args,
				struct drm_file *file_priv);
59
static void i915_gem_free_object_tail(struct drm_gem_object *obj);
60

61 62 63 64 65 66 67
static int
i915_gem_object_get_pages(struct drm_gem_object *obj,
			  gfp_t gfpmask);

static void
i915_gem_object_put_pages(struct drm_gem_object *obj);

68 69 70
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
}

static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
				  size_t size)
{
	dev_priv->mm.gtt_count++;
	dev_priv->mm.gtt_memory += size;
}

static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
				     size_t size)
{
	dev_priv->mm.gtt_count--;
	dev_priv->mm.gtt_memory -= size;
}

static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
				  size_t size)
{
	dev_priv->mm.pin_count++;
	dev_priv->mm.pin_memory += size;
}

static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
				     size_t size)
{
	dev_priv->mm.pin_count--;
	dev_priv->mm.pin_memory -= size;
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
int
i915_gem_check_is_wedged(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct completion *x = &dev_priv->error_completion;
	unsigned long flags;
	int ret;

	if (!atomic_read(&dev_priv->mm.wedged))
		return 0;

	ret = wait_for_completion_interruptible(x);
	if (ret)
		return ret;

	/* Success, we reset the GPU! */
	if (!atomic_read(&dev_priv->mm.wedged))
		return 0;

	/* GPU is hung, bump the completion count to account for
	 * the token we just consumed so that we never hit zero and
	 * end up waiting upon a subsequent completion event that
	 * will never happen.
	 */
	spin_lock_irqsave(&x->wait.lock, flags);
	x->done++;
	spin_unlock_irqrestore(&x->wait.lock, flags);
	return -EIO;
}

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
static int i915_mutex_lock_interruptible(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = i915_gem_check_is_wedged(dev);
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	if (atomic_read(&dev_priv->mm.wedged)) {
		mutex_unlock(&dev->struct_mutex);
		return -EAGAIN;
	}

162
	WARN_ON(i915_verify_lists(dev));
163 164
	return 0;
}
165

166 167 168 169 170 171 172 173
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
{
	return obj_priv->gtt_space &&
		!obj_priv->active &&
		obj_priv->pin_count == 0;
}

174 175
int i915_gem_do_init(struct drm_device *dev,
		     unsigned long start,
J
Jesse Barnes 已提交
176
		     unsigned long end)
177 178 179
{
	drm_i915_private_t *dev_priv = dev->dev_private;

J
Jesse Barnes 已提交
180 181 182
	if (start >= end ||
	    (start & (PAGE_SIZE - 1)) != 0 ||
	    (end & (PAGE_SIZE - 1)) != 0) {
183 184 185
		return -EINVAL;
	}

J
Jesse Barnes 已提交
186 187
	drm_mm_init(&dev_priv->mm.gtt_space, start,
		    end - start);
188

189
	dev_priv->mm.gtt_total = end - start;
J
Jesse Barnes 已提交
190 191 192

	return 0;
}
193

J
Jesse Barnes 已提交
194 195 196 197 198 199 200 201 202
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_i915_gem_init *args = data;
	int ret;

	mutex_lock(&dev->struct_mutex);
	ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
203 204
	mutex_unlock(&dev->struct_mutex);

J
Jesse Barnes 已提交
205
	return ret;
206 207
}

208 209 210 211
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *file_priv)
{
212
	struct drm_i915_private *dev_priv = dev->dev_private;
213 214 215 216 217
	struct drm_i915_gem_get_aperture *args = data;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

218 219 220 221
	mutex_lock(&dev->struct_mutex);
	args->aper_size = dev_priv->mm.gtt_total;
	args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
	mutex_unlock(&dev->struct_mutex);
222 223 224 225

	return 0;
}

226 227 228 229 230 231 232 233 234 235

/**
 * Creates a new mm object and returns a handle to it.
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_i915_gem_create *args = data;
	struct drm_gem_object *obj;
236 237
	int ret;
	u32 handle;
238 239 240 241

	args->size = roundup(args->size, PAGE_SIZE);

	/* Allocate the new object */
242
	obj = i915_gem_alloc_object(dev, args->size);
243 244 245 246
	if (obj == NULL)
		return -ENOMEM;

	ret = drm_gem_handle_create(file_priv, obj, &handle);
247
	if (ret) {
248 249 250
		drm_gem_object_release(obj);
		i915_gem_info_remove_obj(dev->dev_private, obj->size);
		kfree(obj);
251
		return ret;
252
	}
253

254 255 256 257
	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference(obj);
	trace_i915_gem_object_create(obj);

258
	args->handle = handle;
259 260 261
	return 0;
}

262 263 264 265 266 267
static inline int
fast_shmem_read(struct page **pages,
		loff_t page_base, int page_offset,
		char __user *data,
		int length)
{
268
	char *vaddr;
269
	int ret;
270

P
Peter Zijlstra 已提交
271
	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
272
	ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
P
Peter Zijlstra 已提交
273
	kunmap_atomic(vaddr);
274

275
	return ret;
276 277
}

278 279 280
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
{
	drm_i915_private_t *dev_priv = obj->dev->dev_private;
281
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
282 283 284 285 286

	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
		obj_priv->tiling_mode != I915_TILING_NONE;
}

287
static inline void
288 289 290 291 292 293 294 295
slow_shmem_copy(struct page *dst_page,
		int dst_offset,
		struct page *src_page,
		int src_offset,
		int length)
{
	char *dst_vaddr, *src_vaddr;

296 297
	dst_vaddr = kmap(dst_page);
	src_vaddr = kmap(src_page);
298 299 300

	memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);

301 302
	kunmap(src_page);
	kunmap(dst_page);
303 304
}

305
static inline void
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
slow_shmem_bit17_copy(struct page *gpu_page,
		      int gpu_offset,
		      struct page *cpu_page,
		      int cpu_offset,
		      int length,
		      int is_read)
{
	char *gpu_vaddr, *cpu_vaddr;

	/* Use the unswizzled path if this page isn't affected. */
	if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
		if (is_read)
			return slow_shmem_copy(cpu_page, cpu_offset,
					       gpu_page, gpu_offset, length);
		else
			return slow_shmem_copy(gpu_page, gpu_offset,
					       cpu_page, cpu_offset, length);
	}

325 326
	gpu_vaddr = kmap(gpu_page);
	cpu_vaddr = kmap(cpu_page);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

	/* Copy the data, XORing A6 with A17 (1). The user already knows he's
	 * XORing with the other bits (A9 for Y, A9 and A10 for X)
	 */
	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		if (is_read) {
			memcpy(cpu_vaddr + cpu_offset,
			       gpu_vaddr + swizzled_gpu_offset,
			       this_length);
		} else {
			memcpy(gpu_vaddr + swizzled_gpu_offset,
			       cpu_vaddr + cpu_offset,
			       this_length);
		}
		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

350 351
	kunmap(cpu_page);
	kunmap(gpu_page);
352 353
}

354 355 356 357 358 359 360 361 362 363
/**
 * This is the fast shmem pread path, which attempts to copy_from_user directly
 * from the backing pages of the object to the user's address space.  On a
 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
 */
static int
i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
			  struct drm_i915_gem_pread *args,
			  struct drm_file *file_priv)
{
364
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
365 366 367 368 369 370 371 372
	ssize_t remain;
	loff_t offset, page_base;
	char __user *user_data;
	int page_offset, page_length;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;

373
	obj_priv = to_intel_bo(obj);
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
	offset = args->offset;

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		page_base = (offset & ~(PAGE_SIZE-1));
		page_offset = offset & (PAGE_SIZE-1);
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

389 390 391 392
		if (fast_shmem_read(obj_priv->pages,
				    page_base, page_offset,
				    user_data, page_length))
			return -EFAULT;
393 394 395 396 397 398

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

399
	return 0;
400 401
}

402 403 404 405 406
static int
i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
{
	int ret;

407
	ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
408 409 410 411 412 413 414

	/* If we've insufficient memory to map in the pages, attempt
	 * to make some space by throwing out some old buffers.
	 */
	if (ret == -ENOMEM) {
		struct drm_device *dev = obj->dev;

415 416
		ret = i915_gem_evict_something(dev, obj->size,
					       i915_gem_get_gtt_alignment(obj));
417 418 419
		if (ret)
			return ret;

420
		ret = i915_gem_object_get_pages(obj, 0);
421 422 423 424 425
	}

	return ret;
}

426 427 428 429 430 431 432 433 434 435 436
/**
 * This is the fallback shmem pread path, which allocates temporary storage
 * in kernel space to copy_to_user into outside of the struct_mutex, so we
 * can copy out of the object's backing pages while holding the struct mutex
 * and not take page faults.
 */
static int
i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
			  struct drm_i915_gem_pread *args,
			  struct drm_file *file_priv)
{
437
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
438 439 440 441 442 443 444 445 446 447
	struct mm_struct *mm = current->mm;
	struct page **user_pages;
	ssize_t remain;
	loff_t offset, pinned_pages, i;
	loff_t first_data_page, last_data_page, num_pages;
	int shmem_page_index, shmem_page_offset;
	int data_page_index,  data_page_offset;
	int page_length;
	int ret;
	uint64_t data_ptr = args->data_ptr;
448
	int do_bit17_swizzling;
449 450 451 452 453 454 455 456 457 458 459

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, yet we want to hold it while
	 * dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

460
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
461 462 463
	if (user_pages == NULL)
		return -ENOMEM;

464
	mutex_unlock(&dev->struct_mutex);
465 466
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
467
				      num_pages, 1, 0, user_pages, NULL);
468
	up_read(&mm->mmap_sem);
469
	mutex_lock(&dev->struct_mutex);
470 471
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
472
		goto out;
473 474
	}

475 476 477
	ret = i915_gem_object_set_cpu_read_domain_range(obj,
							args->offset,
							args->size);
478
	if (ret)
479
		goto out;
480

481
	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
482

483
	obj_priv = to_intel_bo(obj);
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	offset = args->offset;

	while (remain > 0) {
		/* Operation in this page
		 *
		 * shmem_page_index = page number within shmem file
		 * shmem_page_offset = offset within page in shmem file
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
		shmem_page_index = offset / PAGE_SIZE;
		shmem_page_offset = offset & ~PAGE_MASK;
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
		data_page_offset = data_ptr & ~PAGE_MASK;

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

506
		if (do_bit17_swizzling) {
507
			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
508
					      shmem_page_offset,
509 510 511 512 513 514 515 516 517 518
					      user_pages[data_page_index],
					      data_page_offset,
					      page_length,
					      1);
		} else {
			slow_shmem_copy(user_pages[data_page_index],
					data_page_offset,
					obj_priv->pages[shmem_page_index],
					shmem_page_offset,
					page_length);
519
		}
520 521 522 523 524 525

		remain -= page_length;
		data_ptr += page_length;
		offset += page_length;
	}

526
out:
527 528 529 530
	for (i = 0; i < pinned_pages; i++) {
		SetPageDirty(user_pages[i]);
		page_cache_release(user_pages[i]);
	}
531
	drm_free_large(user_pages);
532 533 534 535

	return ret;
}

536 537 538 539 540 541 542 543 544 545 546 547
/**
 * Reads data from the object referenced by handle.
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
		     struct drm_file *file_priv)
{
	struct drm_i915_gem_pread *args = data;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
548
	int ret = 0;
549

550
	ret = i915_mutex_lock_interruptible(dev);
551
	if (ret)
552
		return ret;
553 554

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
555 556 557
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
558
	}
559
	obj_priv = to_intel_bo(obj);
560

561 562
	/* Bounds check source.  */
	if (args->offset > obj->size || args->size > obj->size - args->offset) {
C
Chris Wilson 已提交
563
		ret = -EINVAL;
564
		goto out;
C
Chris Wilson 已提交
565 566
	}

567 568 569
	if (args->size == 0)
		goto out;

C
Chris Wilson 已提交
570 571 572 573
	if (!access_ok(VERIFY_WRITE,
		       (char __user *)(uintptr_t)args->data_ptr,
		       args->size)) {
		ret = -EFAULT;
574
		goto out;
575 576
	}

577 578 579 580 581
	ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
				       args->size);
	if (ret) {
		ret = -EFAULT;
		goto out;
582
	}
583

584 585 586 587 588 589 590 591 592 593 594 595
	ret = i915_gem_object_get_pages_or_evict(obj);
	if (ret)
		goto out;

	ret = i915_gem_object_set_cpu_read_domain_range(obj,
							args->offset,
							args->size);
	if (ret)
		goto out_put;

	ret = -EFAULT;
	if (!i915_gem_object_needs_bit17_swizzle(obj))
596
		ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
597 598
	if (ret == -EFAULT)
		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
599

600 601
out_put:
	i915_gem_object_put_pages(obj);
602
out:
603
	drm_gem_object_unreference(obj);
604
unlock:
605
	mutex_unlock(&dev->struct_mutex);
606
	return ret;
607 608
}

609 610
/* This is the fast write path which cannot handle
 * page faults in the source data
611
 */
612 613 614 615 616 617

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
618 619
{
	char *vaddr_atomic;
620
	unsigned long unwritten;
621

P
Peter Zijlstra 已提交
622
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
623 624
	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
						      user_data, length);
P
Peter Zijlstra 已提交
625
	io_mapping_unmap_atomic(vaddr_atomic);
626
	return unwritten;
627 628 629 630 631 632
}

/* Here's the write path which can sleep for
 * page faults
 */

633
static inline void
634 635 636 637
slow_kernel_write(struct io_mapping *mapping,
		  loff_t gtt_base, int gtt_offset,
		  struct page *user_page, int user_offset,
		  int length)
638
{
639 640
	char __iomem *dst_vaddr;
	char *src_vaddr;
641

642 643 644 645 646 647 648 649 650
	dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
	src_vaddr = kmap(user_page);

	memcpy_toio(dst_vaddr + gtt_offset,
		    src_vaddr + user_offset,
		    length);

	kunmap(user_page);
	io_mapping_unmap(dst_vaddr);
651 652
}

653 654 655 656 657 658
static inline int
fast_shmem_write(struct page **pages,
		 loff_t page_base, int page_offset,
		 char __user *data,
		 int length)
{
659
	char *vaddr;
660
	int ret;
661

P
Peter Zijlstra 已提交
662
	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
663
	ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
P
Peter Zijlstra 已提交
664
	kunmap_atomic(vaddr);
665

666
	return ret;
667 668
}

669 670 671 672
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 */
673
static int
674 675 676
i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
			 struct drm_i915_gem_pwrite *args,
			 struct drm_file *file_priv)
677
{
678
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
679
	drm_i915_private_t *dev_priv = dev->dev_private;
680
	ssize_t remain;
681
	loff_t offset, page_base;
682
	char __user *user_data;
683
	int page_offset, page_length;
684 685 686 687

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;

688
	obj_priv = to_intel_bo(obj);
689 690 691 692 693
	offset = obj_priv->gtt_offset + args->offset;

	while (remain > 0) {
		/* Operation in this page
		 *
694 695 696
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
697
		 */
698 699 700 701 702 703 704
		page_base = (offset & ~(PAGE_SIZE-1));
		page_offset = offset & (PAGE_SIZE-1);
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

		/* If we get a fault while copying data, then (presumably) our
705 706
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
707
		 */
708 709 710 711
		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
				    page_offset, user_data, page_length))

			return -EFAULT;
712

713 714 715
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
716 717
	}

718
	return 0;
719 720
}

721 722 723 724 725 726 727
/**
 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
 * the memory and maps it using kmap_atomic for copying.
 *
 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
 */
728
static int
729 730 731
i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
			 struct drm_i915_gem_pwrite *args,
			 struct drm_file *file_priv)
732
{
733
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
734 735 736 737 738 739 740 741
	drm_i915_private_t *dev_priv = dev->dev_private;
	ssize_t remain;
	loff_t gtt_page_base, offset;
	loff_t first_data_page, last_data_page, num_pages;
	loff_t pinned_pages, i;
	struct page **user_pages;
	struct mm_struct *mm = current->mm;
	int gtt_page_offset, data_page_offset, data_page_index, page_length;
742
	int ret;
743 744 745 746 747 748 749 750 751 752 753 754
	uint64_t data_ptr = args->data_ptr;

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, and all of the pwrite implementations
	 * want to hold it while dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

755
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
756 757 758
	if (user_pages == NULL)
		return -ENOMEM;

759
	mutex_unlock(&dev->struct_mutex);
760 761 762 763
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
				      num_pages, 0, 0, user_pages, NULL);
	up_read(&mm->mmap_sem);
764
	mutex_lock(&dev->struct_mutex);
765 766 767 768
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
		goto out_unpin_pages;
	}
769

770 771
	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
	if (ret)
772
		goto out_unpin_pages;
773

774
	obj_priv = to_intel_bo(obj);
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
	offset = obj_priv->gtt_offset + args->offset;

	while (remain > 0) {
		/* Operation in this page
		 *
		 * gtt_page_base = page offset within aperture
		 * gtt_page_offset = offset within page in aperture
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
		gtt_page_base = offset & PAGE_MASK;
		gtt_page_offset = offset & ~PAGE_MASK;
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
		data_page_offset = data_ptr & ~PAGE_MASK;

		page_length = remain;
		if ((gtt_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - gtt_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

797 798 799 800 801
		slow_kernel_write(dev_priv->mm.gtt_mapping,
				  gtt_page_base, gtt_page_offset,
				  user_pages[data_page_index],
				  data_page_offset,
				  page_length);
802 803 804 805 806 807 808 809 810

		remain -= page_length;
		offset += page_length;
		data_ptr += page_length;
	}

out_unpin_pages:
	for (i = 0; i < pinned_pages; i++)
		page_cache_release(user_pages[i]);
811
	drm_free_large(user_pages);
812 813 814 815

	return ret;
}

816 817 818 819
/**
 * This is the fast shmem pwrite path, which attempts to directly
 * copy_from_user into the kmapped pages backing the object.
 */
820
static int
821 822 823
i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
			   struct drm_i915_gem_pwrite *args,
			   struct drm_file *file_priv)
824
{
825
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
826 827 828 829 830 831 832
	ssize_t remain;
	loff_t offset, page_base;
	char __user *user_data;
	int page_offset, page_length;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;
833

834
	obj_priv = to_intel_bo(obj);
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
	offset = args->offset;
	obj_priv->dirty = 1;

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		page_base = (offset & ~(PAGE_SIZE-1));
		page_offset = offset & (PAGE_SIZE-1);
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

851
		if (fast_shmem_write(obj_priv->pages,
852
				       page_base, page_offset,
853 854
				       user_data, page_length))
			return -EFAULT;
855 856 857 858 859 860

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

861
	return 0;
862 863 864 865 866 867 868 869 870 871 872 873 874 875
}

/**
 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
 * the memory and maps it using kmap_atomic for copying.
 *
 * This avoids taking mmap_sem for faulting on the user's address while the
 * struct_mutex is held.
 */
static int
i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
			   struct drm_i915_gem_pwrite *args,
			   struct drm_file *file_priv)
{
876
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
877 878 879 880 881 882 883 884 885 886
	struct mm_struct *mm = current->mm;
	struct page **user_pages;
	ssize_t remain;
	loff_t offset, pinned_pages, i;
	loff_t first_data_page, last_data_page, num_pages;
	int shmem_page_index, shmem_page_offset;
	int data_page_index,  data_page_offset;
	int page_length;
	int ret;
	uint64_t data_ptr = args->data_ptr;
887
	int do_bit17_swizzling;
888 889 890 891 892 893 894 895 896 897 898

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, and all of the pwrite implementations
	 * want to hold it while dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

899
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
900 901 902
	if (user_pages == NULL)
		return -ENOMEM;

903
	mutex_unlock(&dev->struct_mutex);
904 905 906 907
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
				      num_pages, 0, 0, user_pages, NULL);
	up_read(&mm->mmap_sem);
908
	mutex_lock(&dev->struct_mutex);
909 910
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
911
		goto out;
912 913
	}

914
	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
915
	if (ret)
916
		goto out;
917

918
	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
919

920
	obj_priv = to_intel_bo(obj);
921
	offset = args->offset;
922
	obj_priv->dirty = 1;
923

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
	while (remain > 0) {
		/* Operation in this page
		 *
		 * shmem_page_index = page number within shmem file
		 * shmem_page_offset = offset within page in shmem file
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
		shmem_page_index = offset / PAGE_SIZE;
		shmem_page_offset = offset & ~PAGE_MASK;
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
		data_page_offset = data_ptr & ~PAGE_MASK;

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

944
		if (do_bit17_swizzling) {
945
			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
946 947 948
					      shmem_page_offset,
					      user_pages[data_page_index],
					      data_page_offset,
949 950 951 952 953 954 955 956
					      page_length,
					      0);
		} else {
			slow_shmem_copy(obj_priv->pages[shmem_page_index],
					shmem_page_offset,
					user_pages[data_page_index],
					data_page_offset,
					page_length);
957
		}
958 959 960 961

		remain -= page_length;
		data_ptr += page_length;
		offset += page_length;
962 963
	}

964
out:
965 966
	for (i = 0; i < pinned_pages; i++)
		page_cache_release(user_pages[i]);
967
	drm_free_large(user_pages);
968

969
	return ret;
970 971 972 973 974 975 976 977 978
}

/**
 * Writes data to the object referenced by handle.
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
979
		      struct drm_file *file)
980 981 982 983 984 985
{
	struct drm_i915_gem_pwrite *args = data;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret = 0;

986
	ret = i915_mutex_lock_interruptible(dev);
987
	if (ret)
988
		return ret;
989 990 991 992 993

	obj = drm_gem_object_lookup(dev, file, args->handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
994
	}
995
	obj_priv = to_intel_bo(obj);
996

997

998 999
	/* Bounds check destination. */
	if (args->offset > obj->size || args->size > obj->size - args->offset) {
C
Chris Wilson 已提交
1000
		ret = -EINVAL;
1001
		goto out;
C
Chris Wilson 已提交
1002 1003
	}

1004 1005 1006
	if (args->size == 0)
		goto out;

C
Chris Wilson 已提交
1007 1008 1009 1010
	if (!access_ok(VERIFY_READ,
		       (char __user *)(uintptr_t)args->data_ptr,
		       args->size)) {
		ret = -EFAULT;
1011
		goto out;
1012 1013
	}

1014 1015 1016 1017 1018
	ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
				      args->size);
	if (ret) {
		ret = -EFAULT;
		goto out;
1019 1020 1021 1022 1023 1024 1025 1026
	}

	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1027
	if (obj_priv->phys_obj)
1028
		ret = i915_gem_phys_pwrite(dev, obj, args, file);
1029
	else if (obj_priv->tiling_mode == I915_TILING_NONE &&
1030
		 obj_priv->gtt_space &&
1031
		 obj->write_domain != I915_GEM_DOMAIN_CPU) {
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		ret = i915_gem_object_pin(obj, 0);
		if (ret)
			goto out;

		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
		if (ret)
			goto out_unpin;

		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
		if (ret == -EFAULT)
			ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);

out_unpin:
		i915_gem_object_unpin(obj);
1046
	} else {
1047 1048 1049
		ret = i915_gem_object_get_pages_or_evict(obj);
		if (ret)
			goto out;
1050

1051 1052 1053
		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
		if (ret)
			goto out_put;
1054

1055 1056 1057 1058 1059 1060 1061 1062 1063
		ret = -EFAULT;
		if (!i915_gem_object_needs_bit17_swizzle(obj))
			ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
		if (ret == -EFAULT)
			ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);

out_put:
		i915_gem_object_put_pages(obj);
	}
1064

1065
out:
1066
	drm_gem_object_unreference(obj);
1067
unlock:
1068
	mutex_unlock(&dev->struct_mutex);
1069 1070 1071 1072
	return ret;
}

/**
1073 1074
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1075 1076 1077 1078 1079
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file_priv)
{
1080
	struct drm_i915_private *dev_priv = dev->dev_private;
1081 1082
	struct drm_i915_gem_set_domain *args = data;
	struct drm_gem_object *obj;
1083
	struct drm_i915_gem_object *obj_priv;
1084 1085
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1086 1087 1088 1089 1090
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1091
	/* Only handle setting domains to types used by the CPU. */
1092
	if (write_domain & I915_GEM_GPU_DOMAINS)
1093 1094
		return -EINVAL;

1095
	if (read_domains & I915_GEM_GPU_DOMAINS)
1096 1097 1098 1099 1100 1101 1102 1103
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1104
	ret = i915_mutex_lock_interruptible(dev);
1105
	if (ret)
1106
		return ret;
1107

1108
	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1109 1110 1111
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
1112
	}
1113
	obj_priv = to_intel_bo(obj);
1114

1115 1116
	intel_mark_busy(dev, obj);

1117 1118
	if (read_domains & I915_GEM_DOMAIN_GTT) {
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1119

1120 1121 1122 1123
		/* Update the LRU on the fence for the CPU access that's
		 * about to occur.
		 */
		if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1124 1125 1126
			struct drm_i915_fence_reg *reg =
				&dev_priv->fence_regs[obj_priv->fence_reg];
			list_move_tail(&reg->lru_list,
1127 1128 1129
				       &dev_priv->mm.fence_list);
		}

1130 1131 1132 1133 1134 1135
		/* Silently promote "you're not bound, there was nothing to do"
		 * to success, since the client was just asking us to
		 * make sure everything was done.
		 */
		if (ret == -EINVAL)
			ret = 0;
1136
	} else {
1137
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1138 1139
	}

1140 1141
	/* Maintain LRU order of "inactive" objects */
	if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1142
		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1143

1144
	drm_gem_object_unreference(obj);
1145
unlock:
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Called when user space has done writes to this buffer
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_i915_gem_sw_finish *args = data;
	struct drm_gem_object *obj;
	int ret = 0;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1164
	ret = i915_mutex_lock_interruptible(dev);
1165
	if (ret)
1166
		return ret;
1167

1168 1169
	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL) {
1170 1171
		ret = -ENOENT;
		goto unlock;
1172 1173 1174
	}

	/* Pinned buffers may be scanout, so flush the cache */
1175
	if (to_intel_bo(obj)->pin_count)
1176 1177
		i915_gem_object_flush_cpu_write_domain(obj);

1178
	drm_gem_object_unreference(obj);
1179
unlock:
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Maps the contents of an object, returning the address it is mapped
 * into.
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_mmap *args = data;
	struct drm_gem_object *obj;
	loff_t offset;
	unsigned long addr;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
1205
		return -ENOENT;
1206 1207 1208 1209 1210 1211 1212 1213

	offset = args->offset;

	down_write(&current->mm->mmap_sem);
	addr = do_mmap(obj->filp, 0, args->size,
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
	up_write(&current->mm->mmap_sem);
1214
	drm_gem_object_unreference_unlocked(obj);
1215 1216 1217 1218 1219 1220 1221 1222
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
/**
 * i915_gem_fault - fault a page into the GTT
 * vma: VMA in question
 * vmf: fault info
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
 */
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_gem_object *obj = vma->vm_private_data;
	struct drm_device *dev = obj->dev;
1243
	drm_i915_private_t *dev_priv = dev->dev_private;
1244
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1245 1246 1247
	pgoff_t page_offset;
	unsigned long pfn;
	int ret = 0;
1248
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1249 1250 1251 1252 1253 1254 1255 1256

	/* We don't use vmf->pgoff since that has the fake offset */
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;

	/* Now bind it into the GTT if needed */
	mutex_lock(&dev->struct_mutex);
	if (!obj_priv->gtt_space) {
1257
		ret = i915_gem_object_bind_to_gtt(obj, 0);
1258 1259
		if (ret)
			goto unlock;
1260 1261

		ret = i915_gem_object_set_to_gtt_domain(obj, write);
1262 1263
		if (ret)
			goto unlock;
1264 1265 1266
	}

	/* Need a new fence register? */
1267
	if (obj_priv->tiling_mode != I915_TILING_NONE) {
1268
		ret = i915_gem_object_get_fence_reg(obj, true);
1269 1270
		if (ret)
			goto unlock;
1271
	}
1272

1273
	if (i915_gem_object_is_inactive(obj_priv))
1274
		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1275

1276 1277 1278 1279 1280
	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
		page_offset;

	/* Finally, remap it using the new GTT offset */
	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1281
unlock:
1282 1283 1284
	mutex_unlock(&dev->struct_mutex);

	switch (ret) {
1285 1286 1287
	case 0:
	case -ERESTARTSYS:
		return VM_FAULT_NOPAGE;
1288 1289 1290 1291
	case -ENOMEM:
	case -EAGAIN:
		return VM_FAULT_OOM;
	default:
1292
		return VM_FAULT_SIGBUS;
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
	}
}

/**
 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
 * @obj: obj in question
 *
 * GEM memory mapping works by handing back to userspace a fake mmap offset
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 * up the object based on the offset and sets up the various memory mapping
 * structures.
 *
 * This routine allocates and attaches a fake offset for @obj.
 */
static int
i915_gem_create_mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct drm_gem_mm *mm = dev->mm_private;
1312
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1313
	struct drm_map_list *list;
1314
	struct drm_local_map *map;
1315 1316 1317 1318
	int ret = 0;

	/* Set the object up for mmap'ing */
	list = &obj->map_list;
1319
	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
	if (!list->map)
		return -ENOMEM;

	map = list->map;
	map->type = _DRM_GEM;
	map->size = obj->size;
	map->handle = obj;

	/* Get a DRM GEM mmap offset allocated... */
	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
						    obj->size / PAGE_SIZE, 0, 0);
	if (!list->file_offset_node) {
		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1333
		ret = -ENOSPC;
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
		goto out_free_list;
	}

	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
						  obj->size / PAGE_SIZE, 0);
	if (!list->file_offset_node) {
		ret = -ENOMEM;
		goto out_free_list;
	}

	list->hash.key = list->file_offset_node->start;
1345 1346
	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
	if (ret) {
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
		DRM_ERROR("failed to add to map hash\n");
		goto out_free_mm;
	}

	/* By now we should be all set, any drm_mmap request on the offset
	 * below will get to our mmap & fault handler */
	obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;

	return 0;

out_free_mm:
	drm_mm_put_block(list->file_offset_node);
out_free_list:
1360
	kfree(list->map);
1361 1362 1363 1364

	return ret;
}

1365 1366 1367 1368
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1369
 * Preserve the reservation of the mmapping with the DRM core code, but
1370 1371 1372 1373 1374 1375 1376 1377 1378
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1379
void
1380 1381 1382
i915_gem_release_mmap(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
1383
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1384 1385 1386 1387 1388 1389

	if (dev->dev_mapping)
		unmap_mapping_range(dev->dev_mapping,
				    obj_priv->mmap_offset, obj->size, 1);
}

1390 1391 1392 1393
static void
i915_gem_free_mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
1394
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_map_list *list;

	list = &obj->map_list;
	drm_ht_remove_item(&mm->offset_hash, &list->hash);

	if (list->file_offset_node) {
		drm_mm_put_block(list->file_offset_node);
		list->file_offset_node = NULL;
	}

	if (list->map) {
1407
		kfree(list->map);
1408 1409 1410 1411 1412 1413
		list->map = NULL;
	}

	obj_priv->mmap_offset = 0;
}

1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
/**
 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
 * @obj: object to check
 *
 * Return the required GTT alignment for an object, taking into account
 * potential fence register mapping if needed.
 */
static uint32_t
i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
1425
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1426 1427 1428 1429 1430 1431
	int start, i;

	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
1432
	if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
1433 1434 1435 1436 1437 1438
		return 4096;

	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
1439
	if (INTEL_INFO(dev)->gen == 3)
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
		start = 1024*1024;
	else
		start = 512*1024;

	for (i = start; i < obj->size; i <<= 1)
		;

	return i;
}

/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file_priv: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
	struct drm_i915_gem_mmap_gtt *args = data;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1477
	ret = i915_mutex_lock_interruptible(dev);
1478
	if (ret)
1479
		return ret;
1480

1481 1482 1483 1484 1485
	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
	}
1486
	obj_priv = to_intel_bo(obj);
1487

1488 1489
	if (obj_priv->madv != I915_MADV_WILLNEED) {
		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1490 1491
		ret = -EINVAL;
		goto out;
1492 1493
	}

1494 1495
	if (!obj_priv->mmap_offset) {
		ret = i915_gem_create_mmap_offset(obj);
1496 1497
		if (ret)
			goto out;
1498 1499 1500 1501 1502 1503 1504 1505 1506
	}

	args->offset = obj_priv->mmap_offset;

	/*
	 * Pull it into the GTT so that we have a page list (makes the
	 * initial fault faster and any subsequent flushing possible).
	 */
	if (!obj_priv->agp_mem) {
1507
		ret = i915_gem_object_bind_to_gtt(obj, 0);
1508 1509
		if (ret)
			goto out;
1510 1511
	}

1512
out:
1513
	drm_gem_object_unreference(obj);
1514
unlock:
1515
	mutex_unlock(&dev->struct_mutex);
1516
	return ret;
1517 1518
}

1519
static void
1520
i915_gem_object_put_pages(struct drm_gem_object *obj)
1521
{
1522
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1523 1524 1525
	int page_count = obj->size / PAGE_SIZE;
	int i;

1526
	BUG_ON(obj_priv->pages_refcount == 0);
C
Chris Wilson 已提交
1527
	BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1528

1529 1530
	if (--obj_priv->pages_refcount != 0)
		return;
1531

1532 1533 1534
	if (obj_priv->tiling_mode != I915_TILING_NONE)
		i915_gem_object_save_bit_17_swizzle(obj);

1535
	if (obj_priv->madv == I915_MADV_DONTNEED)
1536
		obj_priv->dirty = 0;
1537 1538 1539 1540 1541 1542

	for (i = 0; i < page_count; i++) {
		if (obj_priv->dirty)
			set_page_dirty(obj_priv->pages[i]);

		if (obj_priv->madv == I915_MADV_WILLNEED)
1543
			mark_page_accessed(obj_priv->pages[i]);
1544 1545 1546

		page_cache_release(obj_priv->pages[i]);
	}
1547 1548
	obj_priv->dirty = 0;

1549
	drm_free_large(obj_priv->pages);
1550
	obj_priv->pages = NULL;
1551 1552
}

1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
static uint32_t
i915_gem_next_request_seqno(struct drm_device *dev,
			    struct intel_ring_buffer *ring)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	ring->outstanding_lazy_request = true;
	return dev_priv->next_seqno;
}

1563
static void
1564
i915_gem_object_move_to_active(struct drm_gem_object *obj,
1565
			       struct intel_ring_buffer *ring)
1566 1567
{
	struct drm_device *dev = obj->dev;
1568
	struct drm_i915_private *dev_priv = dev->dev_private;
1569
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1570
	uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1571

1572 1573
	BUG_ON(ring == NULL);
	obj_priv->ring = ring;
1574 1575 1576 1577 1578 1579

	/* Add a reference if we're newly entering the active list. */
	if (!obj_priv->active) {
		drm_gem_object_reference(obj);
		obj_priv->active = 1;
	}
1580

1581
	/* Move from whatever list we were on to the tail of execution. */
1582 1583
	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
	list_move_tail(&obj_priv->ring_list, &ring->active_list);
1584
	obj_priv->last_rendering_seqno = seqno;
1585 1586
}

1587 1588 1589 1590 1591
static void
i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
1592
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1593 1594

	BUG_ON(!obj_priv->active);
1595 1596
	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
	list_del_init(&obj_priv->ring_list);
1597 1598
	obj_priv->last_rendering_seqno = 0;
}
1599

1600 1601 1602 1603
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_gem_object *obj)
{
1604
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
C
Chris Wilson 已提交
1605
	struct inode *inode;
1606

1607 1608 1609 1610 1611 1612
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*. Here we mirror the actions taken
	 * when by shmem_delete_inode() to release the backing store.
	 */
C
Chris Wilson 已提交
1613
	inode = obj->filp->f_path.dentry->d_inode;
1614 1615 1616
	truncate_inode_pages(inode->i_mapping, 0);
	if (inode->i_op->truncate_range)
		inode->i_op->truncate_range(inode, 0, (loff_t)-1);
C
Chris Wilson 已提交
1617 1618

	obj_priv->madv = __I915_MADV_PURGED;
1619 1620 1621 1622 1623 1624 1625 1626
}

static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
{
	return obj_priv->madv == I915_MADV_DONTNEED;
}

1627 1628 1629 1630 1631
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
1632
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1633 1634

	if (obj_priv->pin_count != 0)
1635
		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
1636
	else
1637 1638
		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
	list_del_init(&obj_priv->ring_list);
1639

1640 1641
	BUG_ON(!list_empty(&obj_priv->gpu_write_list));

1642
	obj_priv->last_rendering_seqno = 0;
1643
	obj_priv->ring = NULL;
1644 1645 1646 1647
	if (obj_priv->active) {
		obj_priv->active = 0;
		drm_gem_object_unreference(obj);
	}
1648
	WARN_ON(i915_verify_lists(dev));
1649 1650
}

1651 1652
static void
i915_gem_process_flushing_list(struct drm_device *dev,
1653
			       uint32_t flush_domains,
1654
			       struct intel_ring_buffer *ring)
1655 1656 1657 1658 1659
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj_priv, *next;

	list_for_each_entry_safe(obj_priv, next,
1660
				 &ring->gpu_write_list,
1661
				 gpu_write_list) {
1662
		struct drm_gem_object *obj = &obj_priv->base;
1663

1664
		if (obj->write_domain & flush_domains) {
1665 1666 1667 1668
			uint32_t old_write_domain = obj->write_domain;

			obj->write_domain = 0;
			list_del_init(&obj_priv->gpu_write_list);
1669
			i915_gem_object_move_to_active(obj, ring);
1670 1671

			/* update the fence lru list */
1672 1673 1674 1675
			if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
				struct drm_i915_fence_reg *reg =
					&dev_priv->fence_regs[obj_priv->fence_reg];
				list_move_tail(&reg->lru_list,
1676
						&dev_priv->mm.fence_list);
1677
			}
1678 1679 1680 1681 1682 1683 1684

			trace_i915_gem_object_change_domain(obj,
							    obj->read_domains,
							    old_write_domain);
		}
	}
}
1685

1686
uint32_t
1687
i915_add_request(struct drm_device *dev,
1688
		 struct drm_file *file,
C
Chris Wilson 已提交
1689
		 struct drm_i915_gem_request *request,
1690
		 struct intel_ring_buffer *ring)
1691 1692
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1693
	struct drm_i915_file_private *file_priv = NULL;
1694 1695 1696
	uint32_t seqno;
	int was_empty;

1697 1698
	if (file != NULL)
		file_priv = file->driver_priv;
1699

C
Chris Wilson 已提交
1700 1701 1702 1703 1704
	if (request == NULL) {
		request = kzalloc(sizeof(*request), GFP_KERNEL);
		if (request == NULL)
			return 0;
	}
1705

1706
	seqno = ring->add_request(dev, ring, 0);
1707
	ring->outstanding_lazy_request = false;
1708 1709

	request->seqno = seqno;
1710
	request->ring = ring;
1711
	request->emitted_jiffies = jiffies;
1712 1713 1714
	was_empty = list_empty(&ring->request_list);
	list_add_tail(&request->list, &ring->request_list);

1715
	if (file_priv) {
1716
		spin_lock(&file_priv->mm.lock);
1717
		request->file_priv = file_priv;
1718
		list_add_tail(&request->client_list,
1719
			      &file_priv->mm.request_list);
1720
		spin_unlock(&file_priv->mm.lock);
1721
	}
1722

B
Ben Gamari 已提交
1723
	if (!dev_priv->mm.suspended) {
1724 1725
		mod_timer(&dev_priv->hangcheck_timer,
			  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
B
Ben Gamari 已提交
1726
		if (was_empty)
1727 1728
			queue_delayed_work(dev_priv->wq,
					   &dev_priv->mm.retire_work, HZ);
B
Ben Gamari 已提交
1729
	}
1730 1731 1732 1733 1734 1735 1736 1737 1738
	return seqno;
}

/**
 * Command execution barrier
 *
 * Ensures that all commands in the ring are finished
 * before signalling the CPU
 */
1739
static void
1740
i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1741 1742 1743 1744
{
	uint32_t flush_domains = 0;

	/* The sampler always gets flushed on i965 (sigh) */
1745
	if (INTEL_INFO(dev)->gen >= 4)
1746
		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1747 1748 1749

	ring->flush(dev, ring,
			I915_GEM_DOMAIN_COMMAND, flush_domains);
1750 1751
}

1752 1753
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1754
{
1755
	struct drm_i915_file_private *file_priv = request->file_priv;
1756

1757 1758
	if (!file_priv)
		return;
C
Chris Wilson 已提交
1759

1760 1761 1762 1763
	spin_lock(&file_priv->mm.lock);
	list_del(&request->client_list);
	request->file_priv = NULL;
	spin_unlock(&file_priv->mm.lock);
1764 1765
}

1766 1767
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
				      struct intel_ring_buffer *ring)
1768
{
1769 1770
	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;
1771

1772 1773 1774
		request = list_first_entry(&ring->request_list,
					   struct drm_i915_gem_request,
					   list);
1775

1776
		list_del(&request->list);
1777
		i915_gem_request_remove_from_client(request);
1778 1779
		kfree(request);
	}
1780

1781
	while (!list_empty(&ring->active_list)) {
1782 1783
		struct drm_i915_gem_object *obj_priv;

1784
		obj_priv = list_first_entry(&ring->active_list,
1785
					    struct drm_i915_gem_object,
1786
					    ring_list);
1787 1788

		obj_priv->base.write_domain = 0;
1789
		list_del_init(&obj_priv->gpu_write_list);
1790
		i915_gem_object_move_to_inactive(&obj_priv->base);
1791 1792 1793
	}
}

1794
void i915_gem_reset(struct drm_device *dev)
1795
{
1796 1797
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj_priv;
1798
	int i;
1799

1800
	i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
1801
	i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
1802
	i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
1803 1804 1805 1806 1807 1808 1809 1810

	/* Remove anything from the flushing lists. The GPU cache is likely
	 * to be lost on reset along with the data, so simply move the
	 * lost bo to the inactive list.
	 */
	while (!list_empty(&dev_priv->mm.flushing_list)) {
		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
					    struct drm_i915_gem_object,
1811
					    mm_list);
1812 1813 1814 1815 1816 1817 1818 1819 1820

		obj_priv->base.write_domain = 0;
		list_del_init(&obj_priv->gpu_write_list);
		i915_gem_object_move_to_inactive(&obj_priv->base);
	}

	/* Move everything out of the GPU domains to ensure we do any
	 * necessary invalidation upon reuse.
	 */
1821 1822
	list_for_each_entry(obj_priv,
			    &dev_priv->mm.inactive_list,
1823
			    mm_list)
1824 1825 1826
	{
		obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
	}
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837

	/* The fence registers are invalidated so clear them out */
	for (i = 0; i < 16; i++) {
		struct drm_i915_fence_reg *reg;

		reg = &dev_priv->fence_regs[i];
		if (!reg->obj)
			continue;

		i915_gem_clear_fence_reg(reg->obj);
	}
1838 1839 1840 1841 1842
}

/**
 * This function clears the request list as sequence numbers are passed.
 */
1843 1844 1845
static void
i915_gem_retire_requests_ring(struct drm_device *dev,
			      struct intel_ring_buffer *ring)
1846 1847 1848 1849
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t seqno;

1850 1851
	if (!ring->status_page.page_addr ||
	    list_empty(&ring->request_list))
1852 1853
		return;

1854
	WARN_ON(i915_verify_lists(dev));
1855

1856
	seqno = ring->get_seqno(dev, ring);
1857
	while (!list_empty(&ring->request_list)) {
1858 1859
		struct drm_i915_gem_request *request;

1860
		request = list_first_entry(&ring->request_list,
1861 1862 1863
					   struct drm_i915_gem_request,
					   list);

1864
		if (!i915_seqno_passed(seqno, request->seqno))
1865 1866 1867 1868 1869
			break;

		trace_i915_gem_request_retire(dev, request->seqno);

		list_del(&request->list);
1870
		i915_gem_request_remove_from_client(request);
1871 1872
		kfree(request);
	}
1873

1874 1875 1876 1877 1878 1879 1880 1881 1882
	/* Move any buffers on the active list that are no longer referenced
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
	 */
	while (!list_empty(&ring->active_list)) {
		struct drm_gem_object *obj;
		struct drm_i915_gem_object *obj_priv;

		obj_priv = list_first_entry(&ring->active_list,
					    struct drm_i915_gem_object,
1883
					    ring_list);
1884

1885
		if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
1886
			break;
1887 1888 1889 1890 1891 1892

		obj = &obj_priv->base;
		if (obj->write_domain != 0)
			i915_gem_object_move_to_flushing(obj);
		else
			i915_gem_object_move_to_inactive(obj);
1893
	}
1894 1895 1896

	if (unlikely (dev_priv->trace_irq_seqno &&
		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1897
		ring->user_irq_put(dev, ring);
1898 1899
		dev_priv->trace_irq_seqno = 0;
	}
1900 1901

	WARN_ON(i915_verify_lists(dev));
1902 1903
}

1904 1905 1906 1907 1908
void
i915_gem_retire_requests(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
	    struct drm_i915_gem_object *obj_priv, *tmp;

	    /* We must be careful that during unbind() we do not
	     * accidentally infinitely recurse into retire requests.
	     * Currently:
	     *   retire -> free -> unbind -> wait -> retire_ring
	     */
	    list_for_each_entry_safe(obj_priv, tmp,
				     &dev_priv->mm.deferred_free_list,
1919
				     mm_list)
1920 1921 1922
		    i915_gem_free_object_tail(&obj_priv->base);
	}

1923
	i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1924
	i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1925
	i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
1926 1927
}

1928
static void
1929 1930 1931 1932 1933 1934 1935 1936 1937
i915_gem_retire_work_handler(struct work_struct *work)
{
	drm_i915_private_t *dev_priv;
	struct drm_device *dev;

	dev_priv = container_of(work, drm_i915_private_t,
				mm.retire_work.work);
	dev = dev_priv->dev;

1938 1939 1940 1941 1942 1943
	/* Come back later if the device is busy... */
	if (!mutex_trylock(&dev->struct_mutex)) {
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
		return;
	}

1944
	i915_gem_retire_requests(dev);
1945

1946
	if (!dev_priv->mm.suspended &&
1947
		(!list_empty(&dev_priv->render_ring.request_list) ||
1948 1949
		 !list_empty(&dev_priv->bsd_ring.request_list) ||
		 !list_empty(&dev_priv->blt_ring.request_list)))
1950
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1951 1952 1953
	mutex_unlock(&dev->struct_mutex);
}

1954
int
1955
i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1956
		     bool interruptible, struct intel_ring_buffer *ring)
1957 1958
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1959
	u32 ier;
1960 1961 1962 1963
	int ret = 0;

	BUG_ON(seqno == 0);

1964
	if (atomic_read(&dev_priv->mm.wedged))
1965 1966
		return -EAGAIN;

1967
	if (ring->outstanding_lazy_request) {
C
Chris Wilson 已提交
1968
		seqno = i915_add_request(dev, NULL, NULL, ring);
1969 1970 1971
		if (seqno == 0)
			return -ENOMEM;
	}
1972
	BUG_ON(seqno == dev_priv->next_seqno);
1973

1974
	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
1975
		if (HAS_PCH_SPLIT(dev))
1976 1977 1978
			ier = I915_READ(DEIER) | I915_READ(GTIER);
		else
			ier = I915_READ(IER);
1979 1980 1981 1982 1983 1984 1985
		if (!ier) {
			DRM_ERROR("something (likely vbetool) disabled "
				  "interrupts, re-enabling\n");
			i915_driver_irq_preinstall(dev);
			i915_driver_irq_postinstall(dev);
		}

C
Chris Wilson 已提交
1986 1987
		trace_i915_gem_request_wait_begin(dev, seqno);

1988
		ring->waiting_gem_seqno = seqno;
1989
		ring->user_irq_get(dev, ring);
1990
		if (interruptible)
1991 1992
			ret = wait_event_interruptible(ring->irq_queue,
				i915_seqno_passed(
1993
					ring->get_seqno(dev, ring), seqno)
1994
				|| atomic_read(&dev_priv->mm.wedged));
1995
		else
1996 1997
			wait_event(ring->irq_queue,
				i915_seqno_passed(
1998
					ring->get_seqno(dev, ring), seqno)
1999
				|| atomic_read(&dev_priv->mm.wedged));
2000

2001
		ring->user_irq_put(dev, ring);
2002
		ring->waiting_gem_seqno = 0;
C
Chris Wilson 已提交
2003 2004

		trace_i915_gem_request_wait_end(dev, seqno);
2005
	}
2006
	if (atomic_read(&dev_priv->mm.wedged))
2007
		ret = -EAGAIN;
2008 2009

	if (ret && ret != -ERESTARTSYS)
2010
		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2011
			  __func__, ret, seqno, ring->get_seqno(dev, ring),
2012
			  dev_priv->next_seqno);
2013 2014 2015 2016 2017 2018 2019

	/* Directly dispatch request retiring.  While we have the work queue
	 * to handle this, the waiter on a request often wants an associated
	 * buffer to have made it to the inactive list, and we would need
	 * a separate wait queue to handle that.
	 */
	if (ret == 0)
2020
		i915_gem_retire_requests_ring(dev, ring);
2021 2022 2023 2024

	return ret;
}

2025 2026 2027 2028 2029
/**
 * Waits for a sequence number to be signaled, and cleans up the
 * request and object lists appropriately for that event.
 */
static int
2030
i915_wait_request(struct drm_device *dev, uint32_t seqno,
2031
		  struct intel_ring_buffer *ring)
2032
{
2033
	return i915_do_wait_request(dev, seqno, 1, ring);
2034 2035
}

2036
static void
2037
i915_gem_flush_ring(struct drm_device *dev,
2038
		    struct drm_file *file_priv,
2039 2040 2041 2042 2043 2044 2045 2046
		    struct intel_ring_buffer *ring,
		    uint32_t invalidate_domains,
		    uint32_t flush_domains)
{
	ring->flush(dev, ring, invalidate_domains, flush_domains);
	i915_gem_process_flushing_list(dev, flush_domains, ring);
}

2047 2048
static void
i915_gem_flush(struct drm_device *dev,
2049
	       struct drm_file *file_priv,
2050
	       uint32_t invalidate_domains,
2051 2052
	       uint32_t flush_domains,
	       uint32_t flush_rings)
2053 2054
{
	drm_i915_private_t *dev_priv = dev->dev_private;
2055

2056 2057
	if (flush_domains & I915_GEM_DOMAIN_CPU)
		drm_agp_chipset_flush(dev);
2058

2059 2060
	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
		if (flush_rings & RING_RENDER)
2061
			i915_gem_flush_ring(dev, file_priv,
2062 2063 2064
					    &dev_priv->render_ring,
					    invalidate_domains, flush_domains);
		if (flush_rings & RING_BSD)
2065
			i915_gem_flush_ring(dev, file_priv,
2066 2067
					    &dev_priv->bsd_ring,
					    invalidate_domains, flush_domains);
2068 2069 2070 2071
		if (flush_rings & RING_BLT)
			i915_gem_flush_ring(dev, file_priv,
					    &dev_priv->blt_ring,
					    invalidate_domains, flush_domains);
2072
	}
2073 2074
}

2075 2076 2077 2078 2079
/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
 */
static int
2080 2081
i915_gem_object_wait_rendering(struct drm_gem_object *obj,
			       bool interruptible)
2082 2083
{
	struct drm_device *dev = obj->dev;
2084
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2085 2086
	int ret;

2087 2088
	/* This function only exists to support waiting for existing rendering,
	 * not for emitting required flushes.
2089
	 */
2090
	BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
2091 2092 2093 2094 2095

	/* If there is rendering queued on the buffer being evicted, wait for
	 * it.
	 */
	if (obj_priv->active) {
2096 2097 2098 2099 2100
		ret = i915_do_wait_request(dev,
					   obj_priv->last_rendering_seqno,
					   interruptible,
					   obj_priv->ring);
		if (ret)
2101 2102 2103 2104 2105 2106 2107 2108 2109
			return ret;
	}

	return 0;
}

/**
 * Unbinds an object from the GTT aperture.
 */
2110
int
2111 2112 2113
i915_gem_object_unbind(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
2114
	struct drm_i915_private *dev_priv = dev->dev_private;
2115
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
	int ret = 0;

	if (obj_priv->gtt_space == NULL)
		return 0;

	if (obj_priv->pin_count != 0) {
		DRM_ERROR("Attempting to unbind pinned buffer\n");
		return -EINVAL;
	}

2126 2127 2128
	/* blow away mappings if mapped through GTT */
	i915_gem_release_mmap(obj);

2129 2130 2131 2132 2133 2134
	/* Move the object to the CPU domain to ensure that
	 * any possible CPU writes while it's not in the GTT
	 * are flushed when we go to remap it. This will
	 * also ensure that all pending GPU writes are finished
	 * before we unbind.
	 */
2135
	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2136
	if (ret == -ERESTARTSYS)
2137
		return ret;
2138 2139 2140 2141
	/* Continue on if we fail due to EIO, the GPU is hung so we
	 * should be safe and we need to cleanup or else we might
	 * cause memory corruption through use-after-free.
	 */
2142 2143 2144 2145
	if (ret) {
		i915_gem_clflush_object(obj);
		obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
	}
2146

2147 2148 2149 2150
	/* release the fence reg _after_ flushing */
	if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
		i915_gem_clear_fence_reg(obj);

2151 2152
	drm_unbind_agp(obj_priv->agp_mem);
	drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2153

2154
	i915_gem_object_put_pages(obj);
2155
	BUG_ON(obj_priv->pages_refcount);
2156

2157
	i915_gem_info_remove_gtt(dev_priv, obj->size);
2158
	list_del_init(&obj_priv->mm_list);
2159

2160 2161
	drm_mm_put_block(obj_priv->gtt_space);
	obj_priv->gtt_space = NULL;
2162
	obj_priv->gtt_offset = 0;
2163

2164 2165 2166
	if (i915_gem_object_is_purgeable(obj_priv))
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
2167 2168
	trace_i915_gem_object_unbind(obj);

2169
	return ret;
2170 2171
}

2172 2173 2174
static int i915_ring_idle(struct drm_device *dev,
			  struct intel_ring_buffer *ring)
{
2175
	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2176 2177
		return 0;

2178 2179 2180 2181 2182 2183 2184
	i915_gem_flush_ring(dev, NULL, ring,
			    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
	return i915_wait_request(dev,
				 i915_gem_next_request_seqno(dev, ring),
				 ring);
}

2185
int
2186 2187 2188 2189
i915_gpu_idle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	bool lists_empty;
2190
	int ret;
2191

2192
	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2193
		       list_empty(&dev_priv->mm.active_list));
2194 2195 2196 2197
	if (lists_empty)
		return 0;

	/* Flush everything onto the inactive list. */
2198
	ret = i915_ring_idle(dev, &dev_priv->render_ring);
2199 2200
	if (ret)
		return ret;
2201

2202 2203 2204
	ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
	if (ret)
		return ret;
2205

2206 2207 2208
	ret = i915_ring_idle(dev, &dev_priv->blt_ring);
	if (ret)
		return ret;
2209

2210
	return 0;
2211 2212
}

2213
static int
2214 2215
i915_gem_object_get_pages(struct drm_gem_object *obj,
			  gfp_t gfpmask)
2216
{
2217
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2218 2219 2220 2221 2222
	int page_count, i;
	struct address_space *mapping;
	struct inode *inode;
	struct page *page;

2223 2224 2225
	BUG_ON(obj_priv->pages_refcount
			== DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);

2226
	if (obj_priv->pages_refcount++ != 0)
2227 2228 2229 2230 2231 2232
		return 0;

	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 */
	page_count = obj->size / PAGE_SIZE;
2233
	BUG_ON(obj_priv->pages != NULL);
2234
	obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2235 2236
	if (obj_priv->pages == NULL) {
		obj_priv->pages_refcount--;
2237 2238 2239 2240 2241 2242
		return -ENOMEM;
	}

	inode = obj->filp->f_path.dentry->d_inode;
	mapping = inode->i_mapping;
	for (i = 0; i < page_count; i++) {
2243
		page = read_cache_page_gfp(mapping, i,
2244
					   GFP_HIGHUSER |
2245
					   __GFP_COLD |
2246
					   __GFP_RECLAIMABLE |
2247
					   gfpmask);
2248 2249 2250
		if (IS_ERR(page))
			goto err_pages;

2251
		obj_priv->pages[i] = page;
2252
	}
2253 2254 2255 2256

	if (obj_priv->tiling_mode != I915_TILING_NONE)
		i915_gem_object_do_bit_17_swizzle(obj);

2257
	return 0;
2258 2259 2260 2261 2262 2263 2264 2265 2266

err_pages:
	while (i--)
		page_cache_release(obj_priv->pages[i]);

	drm_free_large(obj_priv->pages);
	obj_priv->pages = NULL;
	obj_priv->pages_refcount--;
	return PTR_ERR(page);
2267 2268
}

2269 2270 2271 2272 2273
static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
{
	struct drm_gem_object *obj = reg->obj;
	struct drm_device *dev = obj->dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
2274
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290
	int regnum = obj_priv->fence_reg;
	uint64_t val;

	val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
		    0xfffff000) << 32;
	val |= obj_priv->gtt_offset & 0xfffff000;
	val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
		SANDYBRIDGE_FENCE_PITCH_SHIFT;

	if (obj_priv->tiling_mode == I915_TILING_Y)
		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
	val |= I965_FENCE_REG_VALID;

	I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
}

2291 2292 2293 2294 2295
static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
{
	struct drm_gem_object *obj = reg->obj;
	struct drm_device *dev = obj->dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
2296
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315
	int regnum = obj_priv->fence_reg;
	uint64_t val;

	val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
		    0xfffff000) << 32;
	val |= obj_priv->gtt_offset & 0xfffff000;
	val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
	if (obj_priv->tiling_mode == I915_TILING_Y)
		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
	val |= I965_FENCE_REG_VALID;

	I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
}

static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
{
	struct drm_gem_object *obj = reg->obj;
	struct drm_device *dev = obj->dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
2316
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2317
	int regnum = obj_priv->fence_reg;
2318
	int tile_width;
2319
	uint32_t fence_reg, val;
2320 2321 2322 2323
	uint32_t pitch_val;

	if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
	    (obj_priv->gtt_offset & (obj->size - 1))) {
2324
		WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2325
		     __func__, obj_priv->gtt_offset, obj->size);
2326 2327 2328
		return;
	}

2329 2330 2331
	if (obj_priv->tiling_mode == I915_TILING_Y &&
	    HAS_128_BYTE_Y_TILING(dev))
		tile_width = 128;
2332
	else
2333 2334 2335 2336 2337
		tile_width = 512;

	/* Note: pitch better be a power of two tile widths */
	pitch_val = obj_priv->stride / tile_width;
	pitch_val = ffs(pitch_val) - 1;
2338

2339 2340 2341 2342 2343 2344
	if (obj_priv->tiling_mode == I915_TILING_Y &&
	    HAS_128_BYTE_Y_TILING(dev))
		WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
	else
		WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);

2345 2346 2347 2348 2349 2350 2351
	val = obj_priv->gtt_offset;
	if (obj_priv->tiling_mode == I915_TILING_Y)
		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
	val |= I915_FENCE_SIZE_BITS(obj->size);
	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
	val |= I830_FENCE_REG_VALID;

2352 2353 2354 2355 2356
	if (regnum < 8)
		fence_reg = FENCE_REG_830_0 + (regnum * 4);
	else
		fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
	I915_WRITE(fence_reg, val);
2357 2358 2359 2360 2361 2362 2363
}

static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
{
	struct drm_gem_object *obj = reg->obj;
	struct drm_device *dev = obj->dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
2364
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2365 2366 2367
	int regnum = obj_priv->fence_reg;
	uint32_t val;
	uint32_t pitch_val;
2368
	uint32_t fence_size_bits;
2369

2370
	if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2371
	    (obj_priv->gtt_offset & (obj->size - 1))) {
2372
		WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2373
		     __func__, obj_priv->gtt_offset);
2374 2375 2376
		return;
	}

2377 2378 2379 2380
	pitch_val = obj_priv->stride / 128;
	pitch_val = ffs(pitch_val) - 1;
	WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);

2381 2382 2383
	val = obj_priv->gtt_offset;
	if (obj_priv->tiling_mode == I915_TILING_Y)
		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2384 2385 2386
	fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
	WARN_ON(fence_size_bits & ~0x00000f00);
	val |= fence_size_bits;
2387 2388 2389 2390 2391 2392
	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
	val |= I830_FENCE_REG_VALID;

	I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}

2393 2394
static int i915_find_fence_reg(struct drm_device *dev,
			       bool interruptible)
2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
{
	struct drm_i915_fence_reg *reg = NULL;
	struct drm_i915_gem_object *obj_priv = NULL;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_gem_object *obj = NULL;
	int i, avail, ret;

	/* First try to find a free reg */
	avail = 0;
	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
		reg = &dev_priv->fence_regs[i];
		if (!reg->obj)
			return i;

2409
		obj_priv = to_intel_bo(reg->obj);
2410 2411 2412 2413 2414 2415 2416 2417 2418
		if (!obj_priv->pin_count)
		    avail++;
	}

	if (avail == 0)
		return -ENOSPC;

	/* None available, try to steal one or wait for a user to finish */
	i = I915_FENCE_REG_NONE;
2419 2420 2421 2422
	list_for_each_entry(reg, &dev_priv->mm.fence_list,
			    lru_list) {
		obj = reg->obj;
		obj_priv = to_intel_bo(obj);
2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438

		if (obj_priv->pin_count)
			continue;

		/* found one! */
		i = obj_priv->fence_reg;
		break;
	}

	BUG_ON(i == I915_FENCE_REG_NONE);

	/* We only have a reference on obj from the active list. put_fence_reg
	 * might drop that one, causing a use-after-free in it. So hold a
	 * private reference to obj like the other callers of put_fence_reg
	 * (set_tiling ioctl) do. */
	drm_gem_object_reference(obj);
2439
	ret = i915_gem_object_put_fence_reg(obj, interruptible);
2440 2441 2442 2443 2444 2445 2446
	drm_gem_object_unreference(obj);
	if (ret != 0)
		return ret;

	return i;
}

2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459
/**
 * i915_gem_object_get_fence_reg - set up a fence reg for an object
 * @obj: object to map through a fence reg
 *
 * When mapping objects through the GTT, userspace wants to be able to write
 * to them without having to worry about swizzling if the object is tiled.
 *
 * This function walks the fence regs looking for a free one for @obj,
 * stealing one if it can't find any.
 *
 * It then sets up the reg based on the object's properties: address, pitch
 * and tiling format.
 */
2460
int
2461 2462
i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
			      bool interruptible)
2463 2464
{
	struct drm_device *dev = obj->dev;
J
Jesse Barnes 已提交
2465
	struct drm_i915_private *dev_priv = dev->dev_private;
2466
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2467
	struct drm_i915_fence_reg *reg = NULL;
2468
	int ret;
2469

2470 2471
	/* Just update our place in the LRU if our fence is getting used. */
	if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2472 2473
		reg = &dev_priv->fence_regs[obj_priv->fence_reg];
		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2474 2475 2476
		return 0;
	}

2477 2478 2479 2480 2481
	switch (obj_priv->tiling_mode) {
	case I915_TILING_NONE:
		WARN(1, "allocating a fence for non-tiled object?\n");
		break;
	case I915_TILING_X:
2482 2483 2484 2485 2486
		if (!obj_priv->stride)
			return -EINVAL;
		WARN((obj_priv->stride & (512 - 1)),
		     "object 0x%08x is X tiled but has non-512B pitch\n",
		     obj_priv->gtt_offset);
2487 2488
		break;
	case I915_TILING_Y:
2489 2490 2491 2492 2493
		if (!obj_priv->stride)
			return -EINVAL;
		WARN((obj_priv->stride & (128 - 1)),
		     "object 0x%08x is Y tiled but has non-128B pitch\n",
		     obj_priv->gtt_offset);
2494 2495 2496
		break;
	}

2497
	ret = i915_find_fence_reg(dev, interruptible);
2498 2499
	if (ret < 0)
		return ret;
2500

2501 2502
	obj_priv->fence_reg = ret;
	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2503
	list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2504

2505 2506
	reg->obj = obj;

2507 2508
	switch (INTEL_INFO(dev)->gen) {
	case 6:
2509
		sandybridge_write_fence_reg(reg);
2510 2511 2512
		break;
	case 5:
	case 4:
2513
		i965_write_fence_reg(reg);
2514 2515
		break;
	case 3:
2516
		i915_write_fence_reg(reg);
2517 2518
		break;
	case 2:
2519
		i830_write_fence_reg(reg);
2520 2521
		break;
	}
2522

2523 2524
	trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
			obj_priv->tiling_mode);
C
Chris Wilson 已提交
2525

2526
	return 0;
2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
}

/**
 * i915_gem_clear_fence_reg - clear out fence register info
 * @obj: object to clear
 *
 * Zeroes out the fence register itself and clears out the associated
 * data structures in dev_priv and obj_priv.
 */
static void
i915_gem_clear_fence_reg(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
J
Jesse Barnes 已提交
2540
	drm_i915_private_t *dev_priv = dev->dev_private;
2541
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2542 2543
	struct drm_i915_fence_reg *reg =
		&dev_priv->fence_regs[obj_priv->fence_reg];
2544
	uint32_t fence_reg;
2545

2546 2547
	switch (INTEL_INFO(dev)->gen) {
	case 6:
2548 2549
		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
			     (obj_priv->fence_reg * 8), 0);
2550 2551 2552
		break;
	case 5:
	case 4:
2553
		I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2554 2555
		break;
	case 3:
2556
		if (obj_priv->fence_reg >= 8)
2557
			fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2558
		else
2559 2560
	case 2:
			fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2561 2562

		I915_WRITE(fence_reg, 0);
2563
		break;
2564
	}
2565

2566
	reg->obj = NULL;
2567
	obj_priv->fence_reg = I915_FENCE_REG_NONE;
2568
	list_del_init(&reg->lru_list);
2569 2570
}

2571 2572 2573 2574
/**
 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
 * to the buffer to finish, and then resets the fence register.
 * @obj: tiled object holding a fence register.
2575
 * @bool: whether the wait upon the fence is interruptible
2576 2577 2578 2579 2580
 *
 * Zeroes out the fence register itself and clears out the associated
 * data structures in dev_priv and obj_priv.
 */
int
2581 2582
i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
			      bool interruptible)
2583 2584
{
	struct drm_device *dev = obj->dev;
C
Chris Wilson 已提交
2585
	struct drm_i915_private *dev_priv = dev->dev_private;
2586
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
C
Chris Wilson 已提交
2587
	struct drm_i915_fence_reg *reg;
2588 2589 2590 2591

	if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
		return 0;

2592 2593 2594 2595 2596 2597
	/* If we've changed tiling, GTT-mappings of the object
	 * need to re-fault to ensure that the correct fence register
	 * setup is in place.
	 */
	i915_gem_release_mmap(obj);

2598 2599 2600 2601
	/* On the i915, GPU access to tiled buffers is via a fence,
	 * therefore we must wait for any outstanding access to complete
	 * before clearing the fence.
	 */
C
Chris Wilson 已提交
2602 2603
	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
	if (reg->gpu) {
2604 2605
		int ret;

2606
		ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2607
		if (ret)
2608 2609
			return ret;

2610
		ret = i915_gem_object_wait_rendering(obj, interruptible);
2611
		if (ret)
2612
			return ret;
C
Chris Wilson 已提交
2613 2614

		reg->gpu = false;
2615 2616
	}

2617
	i915_gem_object_flush_gtt_write_domain(obj);
2618
	i915_gem_clear_fence_reg(obj);
2619 2620 2621 2622

	return 0;
}

2623 2624 2625 2626 2627 2628 2629 2630
/**
 * Finds free space in the GTT aperture and binds the object there.
 */
static int
i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
{
	struct drm_device *dev = obj->dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
2631
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2632
	struct drm_mm_node *free_space;
2633
	gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
2634
	int ret;
2635

C
Chris Wilson 已提交
2636
	if (obj_priv->madv != I915_MADV_WILLNEED) {
2637 2638 2639 2640
		DRM_ERROR("Attempting to bind a purgeable object\n");
		return -EINVAL;
	}

2641
	if (alignment == 0)
2642
		alignment = i915_gem_get_gtt_alignment(obj);
2643
	if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2644 2645 2646 2647
		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
		return -EINVAL;
	}

2648 2649 2650
	/* If the object is bigger than the entire aperture, reject it early
	 * before evicting everything in a vain attempt to find space.
	 */
2651
	if (obj->size > dev_priv->mm.gtt_total) {
2652 2653 2654 2655
		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
		return -E2BIG;
	}

2656 2657 2658
 search_free:
	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
					obj->size, alignment, 0);
2659
	if (free_space != NULL)
2660 2661 2662 2663 2664 2665
		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
						       alignment);
	if (obj_priv->gtt_space == NULL) {
		/* If the gtt is empty and we're still having trouble
		 * fitting our object in, we're out of memory.
		 */
2666
		ret = i915_gem_evict_something(dev, obj->size, alignment);
2667
		if (ret)
2668
			return ret;
2669

2670 2671 2672
		goto search_free;
	}

2673
	ret = i915_gem_object_get_pages(obj, gfpmask);
2674 2675 2676
	if (ret) {
		drm_mm_put_block(obj_priv->gtt_space);
		obj_priv->gtt_space = NULL;
2677 2678 2679

		if (ret == -ENOMEM) {
			/* first try to clear up some space from the GTT */
2680 2681
			ret = i915_gem_evict_something(dev, obj->size,
						       alignment);
2682 2683
			if (ret) {
				/* now try to shrink everyone else */
2684 2685 2686
				if (gfpmask) {
					gfpmask = 0;
					goto search_free;
2687 2688 2689 2690 2691 2692 2693 2694
				}

				return ret;
			}

			goto search_free;
		}

2695 2696 2697 2698 2699 2700 2701
		return ret;
	}

	/* Create an AGP memory structure pointing at our pages, and bind it
	 * into the GTT.
	 */
	obj_priv->agp_mem = drm_agp_bind_pages(dev,
2702
					       obj_priv->pages,
2703
					       obj->size >> PAGE_SHIFT,
2704
					       obj_priv->gtt_space->start,
2705
					       obj_priv->agp_type);
2706
	if (obj_priv->agp_mem == NULL) {
2707
		i915_gem_object_put_pages(obj);
2708 2709
		drm_mm_put_block(obj_priv->gtt_space);
		obj_priv->gtt_space = NULL;
2710

2711
		ret = i915_gem_evict_something(dev, obj->size, alignment);
2712
		if (ret)
2713 2714 2715
			return ret;

		goto search_free;
2716 2717
	}

2718
	/* keep track of bounds object by adding it to the inactive list */
2719
	list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
2720
	i915_gem_info_add_gtt(dev_priv, obj->size);
2721

2722 2723 2724 2725
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2726 2727
	BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2728

2729
	obj_priv->gtt_offset = obj_priv->gtt_space->start;
C
Chris Wilson 已提交
2730 2731
	trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);

2732 2733 2734 2735 2736 2737
	return 0;
}

void
i915_gem_clflush_object(struct drm_gem_object *obj)
{
2738
	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
2739 2740 2741 2742 2743

	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
2744
	if (obj_priv->pages == NULL)
2745 2746
		return;

C
Chris Wilson 已提交
2747
	trace_i915_gem_object_clflush(obj);
2748

2749
	drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2750 2751
}

2752
/** Flushes any GPU write domain for the object if it's dirty. */
2753
static int
2754 2755
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
				       bool pipelined)
2756 2757
{
	struct drm_device *dev = obj->dev;
C
Chris Wilson 已提交
2758
	uint32_t old_write_domain;
2759 2760

	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2761
		return 0;
2762 2763

	/* Queue the GPU write cache flushing we need. */
C
Chris Wilson 已提交
2764
	old_write_domain = obj->write_domain;
2765
	i915_gem_flush_ring(dev, NULL,
2766 2767
			    to_intel_bo(obj)->ring,
			    0, obj->write_domain);
2768
	BUG_ON(obj->write_domain);
C
Chris Wilson 已提交
2769 2770 2771 2772

	trace_i915_gem_object_change_domain(obj,
					    obj->read_domains,
					    old_write_domain);
2773 2774 2775 2776

	if (pipelined)
		return 0;

2777
	return i915_gem_object_wait_rendering(obj, true);
2778 2779 2780 2781 2782 2783
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
{
C
Chris Wilson 已提交
2784 2785
	uint32_t old_write_domain;

2786 2787 2788 2789 2790 2791 2792
	if (obj->write_domain != I915_GEM_DOMAIN_GTT)
		return;

	/* No actual flushing is required for the GTT write domain.   Writes
	 * to it immediately go to main memory as far as we know, so there's
	 * no chipset flush.  It also doesn't land in render cache.
	 */
C
Chris Wilson 已提交
2793
	old_write_domain = obj->write_domain;
2794
	obj->write_domain = 0;
C
Chris Wilson 已提交
2795 2796 2797 2798

	trace_i915_gem_object_change_domain(obj,
					    obj->read_domains,
					    old_write_domain);
2799 2800 2801 2802 2803 2804 2805
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
C
Chris Wilson 已提交
2806
	uint32_t old_write_domain;
2807 2808 2809 2810 2811 2812

	if (obj->write_domain != I915_GEM_DOMAIN_CPU)
		return;

	i915_gem_clflush_object(obj);
	drm_agp_chipset_flush(dev);
C
Chris Wilson 已提交
2813
	old_write_domain = obj->write_domain;
2814
	obj->write_domain = 0;
C
Chris Wilson 已提交
2815 2816 2817 2818

	trace_i915_gem_object_change_domain(obj,
					    obj->read_domains,
					    old_write_domain);
2819 2820
}

2821 2822 2823 2824 2825 2826
/**
 * Moves a single object to the GTT read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
2827
int
2828 2829
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
{
2830
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
C
Chris Wilson 已提交
2831
	uint32_t old_write_domain, old_read_domains;
2832
	int ret;
2833

2834 2835 2836 2837
	/* Not valid to be called on unbound objects. */
	if (obj_priv->gtt_space == NULL)
		return -EINVAL;

2838
	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2839 2840 2841
	if (ret != 0)
		return ret;

2842
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
2843

2844
	if (write) {
2845
		ret = i915_gem_object_wait_rendering(obj, true);
2846 2847 2848
		if (ret)
			return ret;
	}
2849

C
Chris Wilson 已提交
2850 2851 2852
	old_write_domain = obj->write_domain;
	old_read_domains = obj->read_domains;

2853 2854 2855 2856 2857 2858
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->read_domains |= I915_GEM_DOMAIN_GTT;
	if (write) {
2859
		obj->read_domains = I915_GEM_DOMAIN_GTT;
2860 2861
		obj->write_domain = I915_GEM_DOMAIN_GTT;
		obj_priv->dirty = 1;
2862 2863
	}

C
Chris Wilson 已提交
2864 2865 2866 2867
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

2868 2869 2870
	return 0;
}

2871 2872 2873 2874 2875
/*
 * Prepare buffer for display plane. Use uninterruptible for possible flush
 * wait, as in modesetting process we're not supposed to be interrupted.
 */
int
2876 2877
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
				     bool pipelined)
2878
{
2879
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2880
	uint32_t old_read_domains;
2881 2882 2883 2884 2885 2886
	int ret;

	/* Not valid to be called on unbound objects. */
	if (obj_priv->gtt_space == NULL)
		return -EINVAL;

2887
	ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2888 2889
	if (ret)
		return ret;
2890

2891 2892 2893 2894
	/* Currently, we are always called from an non-interruptible context. */
	if (!pipelined) {
		ret = i915_gem_object_wait_rendering(obj, false);
		if (ret)
2895 2896 2897
			return ret;
	}

2898 2899
	i915_gem_object_flush_cpu_write_domain(obj);

2900
	old_read_domains = obj->read_domains;
2901
	obj->read_domains |= I915_GEM_DOMAIN_GTT;
2902 2903 2904

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
2905
					    obj->write_domain);
2906 2907 2908 2909

	return 0;
}

2910 2911 2912 2913 2914 2915 2916 2917 2918
/**
 * Moves a single object to the CPU read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
{
C
Chris Wilson 已提交
2919
	uint32_t old_write_domain, old_read_domains;
2920 2921
	int ret;

2922
	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
2923 2924
	if (ret != 0)
		return ret;
2925

2926
	i915_gem_object_flush_gtt_write_domain(obj);
2927

2928 2929
	/* If we have a partially-valid cache of the object in the CPU,
	 * finish invalidating it and free the per-page flags.
2930
	 */
2931
	i915_gem_object_set_to_full_cpu_read_domain(obj);
2932

2933
	if (write) {
2934
		ret = i915_gem_object_wait_rendering(obj, true);
2935 2936 2937 2938
		if (ret)
			return ret;
	}

C
Chris Wilson 已提交
2939 2940 2941
	old_write_domain = obj->write_domain;
	old_read_domains = obj->read_domains;

2942 2943
	/* Flush the CPU cache if it's still invalid. */
	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2944 2945
		i915_gem_clflush_object(obj);

2946
		obj->read_domains |= I915_GEM_DOMAIN_CPU;
2947 2948 2949 2950 2951
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
2952 2953 2954 2955 2956 2957
	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
2958
		obj->read_domains = I915_GEM_DOMAIN_CPU;
2959 2960
		obj->write_domain = I915_GEM_DOMAIN_CPU;
	}
2961

C
Chris Wilson 已提交
2962 2963 2964 2965
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

2966 2967 2968
	return 0;
}

2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
/*
 * Set the next domain for the specified object. This
 * may not actually perform the necessary flushing/invaliding though,
 * as that may want to be batched with other set_domain operations
 *
 * This is (we hope) the only really tricky part of gem. The goal
 * is fairly simple -- track which caches hold bits of the object
 * and make sure they remain coherent. A few concrete examples may
 * help to explain how it works. For shorthand, we use the notation
 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
 * a pair of read and write domain masks.
 *
 * Case 1: the batch buffer
 *
 *	1. Allocated
 *	2. Written by CPU
 *	3. Mapped to GTT
 *	4. Read by GPU
 *	5. Unmapped from GTT
 *	6. Freed
 *
 *	Let's take these a step at a time
 *
 *	1. Allocated
 *		Pages allocated from the kernel may still have
 *		cache contents, so we set them to (CPU, CPU) always.
 *	2. Written by CPU (using pwrite)
 *		The pwrite function calls set_domain (CPU, CPU) and
 *		this function does nothing (as nothing changes)
 *	3. Mapped by GTT
 *		This function asserts that the object is not
 *		currently in any GPU-based read or write domains
 *	4. Read by GPU
 *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
 *		As write_domain is zero, this function adds in the
 *		current read domains (CPU+COMMAND, 0).
 *		flush_domains is set to CPU.
 *		invalidate_domains is set to COMMAND
 *		clflush is run to get data out of the CPU caches
 *		then i915_dev_set_domain calls i915_gem_flush to
 *		emit an MI_FLUSH and drm_agp_chipset_flush
 *	5. Unmapped from GTT
 *		i915_gem_object_unbind calls set_domain (CPU, CPU)
 *		flush_domains and invalidate_domains end up both zero
 *		so no flushing/invalidating happens
 *	6. Freed
 *		yay, done
 *
 * Case 2: The shared render buffer
 *
 *	1. Allocated
 *	2. Mapped to GTT
 *	3. Read/written by GPU
 *	4. set_domain to (CPU,CPU)
 *	5. Read/written by CPU
 *	6. Read/written by GPU
 *
 *	1. Allocated
 *		Same as last example, (CPU, CPU)
 *	2. Mapped to GTT
 *		Nothing changes (assertions find that it is not in the GPU)
 *	3. Read/written by GPU
 *		execbuffer calls set_domain (RENDER, RENDER)
 *		flush_domains gets CPU
 *		invalidate_domains gets GPU
 *		clflush (obj)
 *		MI_FLUSH and drm_agp_chipset_flush
 *	4. set_domain (CPU, CPU)
 *		flush_domains gets GPU
 *		invalidate_domains gets CPU
 *		wait_rendering (obj) to make sure all drawing is complete.
 *		This will include an MI_FLUSH to get the data from GPU
 *		to memory
 *		clflush (obj) to invalidate the CPU cache
 *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
 *	5. Read/written by CPU
 *		cache lines are loaded and dirtied
 *	6. Read written by GPU
 *		Same as last GPU access
 *
 * Case 3: The constant buffer
 *
 *	1. Allocated
 *	2. Written by CPU
 *	3. Read by GPU
 *	4. Updated (written) by CPU again
 *	5. Read by GPU
 *
 *	1. Allocated
 *		(CPU, CPU)
 *	2. Written by CPU
 *		(CPU, CPU)
 *	3. Read by GPU
 *		(CPU+RENDER, 0)
 *		flush_domains = CPU
 *		invalidate_domains = RENDER
 *		clflush (obj)
 *		MI_FLUSH
 *		drm_agp_chipset_flush
 *	4. Updated (written) by CPU again
 *		(CPU, CPU)
 *		flush_domains = 0 (no previous write domain)
 *		invalidate_domains = 0 (no new read domains)
 *	5. Read by GPU
 *		(CPU+RENDER, 0)
 *		flush_domains = CPU
 *		invalidate_domains = RENDER
 *		clflush (obj)
 *		MI_FLUSH
 *		drm_agp_chipset_flush
 */
3080
static void
3081 3082
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
				  struct intel_ring_buffer *ring)
3083 3084
{
	struct drm_device		*dev = obj->dev;
3085
	struct drm_i915_private		*dev_priv = dev->dev_private;
3086
	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
3087 3088
	uint32_t			invalidate_domains = 0;
	uint32_t			flush_domains = 0;
C
Chris Wilson 已提交
3089
	uint32_t			old_read_domains;
3090

3091 3092
	intel_mark_busy(dev, obj);

3093 3094 3095 3096
	/*
	 * If the object isn't moving to a new write domain,
	 * let the object stay in multiple read domains
	 */
3097 3098
	if (obj->pending_write_domain == 0)
		obj->pending_read_domains |= obj->read_domains;
3099 3100 3101 3102 3103 3104 3105 3106 3107
	else
		obj_priv->dirty = 1;

	/*
	 * Flush the current write domain if
	 * the new read domains don't match. Invalidate
	 * any read domains which differ from the old
	 * write domain
	 */
3108 3109
	if (obj->write_domain &&
	    obj->write_domain != obj->pending_read_domains) {
3110
		flush_domains |= obj->write_domain;
3111 3112
		invalidate_domains |=
			obj->pending_read_domains & ~obj->write_domain;
3113 3114 3115 3116 3117
	}
	/*
	 * Invalidate any read caches which may have
	 * stale data. That is, any new read domains.
	 */
3118
	invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3119
	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3120 3121
		i915_gem_clflush_object(obj);

C
Chris Wilson 已提交
3122 3123
	old_read_domains = obj->read_domains;

3124 3125 3126 3127 3128 3129 3130 3131
	/* The actual obj->write_domain will be updated with
	 * pending_write_domain after we emit the accumulated flush for all
	 * of our domain changes in execbuffers (which clears objects'
	 * write_domains).  So if we have a current write domain that we
	 * aren't changing, set pending_write_domain to that.
	 */
	if (flush_domains == 0 && obj->pending_write_domain == 0)
		obj->pending_write_domain = obj->write_domain;
3132
	obj->read_domains = obj->pending_read_domains;
3133 3134 3135

	dev->invalidate_domains |= invalidate_domains;
	dev->flush_domains |= flush_domains;
3136
	if (flush_domains & I915_GEM_GPU_DOMAINS)
3137
		dev_priv->mm.flush_rings |= obj_priv->ring->id;
3138 3139
	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
		dev_priv->mm.flush_rings |= ring->id;
C
Chris Wilson 已提交
3140 3141 3142 3143

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    obj->write_domain);
3144 3145 3146
}

/**
3147
 * Moves the object from a partially CPU read to a full one.
3148
 *
3149 3150
 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3151
 */
3152 3153
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3154
{
3155
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3156

3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167
	if (!obj_priv->page_cpu_valid)
		return;

	/* If we're partially in the CPU read domain, finish moving it in.
	 */
	if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
		int i;

		for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
			if (obj_priv->page_cpu_valid[i])
				continue;
3168
			drm_clflush_pages(obj_priv->pages + i, 1);
3169 3170 3171 3172 3173 3174
		}
	}

	/* Free the page_cpu_valid mappings which are now stale, whether
	 * or not we've got I915_GEM_DOMAIN_CPU.
	 */
3175
	kfree(obj_priv->page_cpu_valid);
3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194
	obj_priv->page_cpu_valid = NULL;
}

/**
 * Set the CPU read domain on a range of the object.
 *
 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
 * not entirely valid.  The page_cpu_valid member of the object flags which
 * pages have been flushed, and will be respected by
 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
 * of the whole object.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
					  uint64_t offset, uint64_t size)
{
3195
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
C
Chris Wilson 已提交
3196
	uint32_t old_read_domains;
3197
	int i, ret;
3198

3199 3200
	if (offset == 0 && size == obj->size)
		return i915_gem_object_set_to_cpu_domain(obj, 0);
3201

3202
	ret = i915_gem_object_flush_gpu_write_domain(obj, false);
3203
	if (ret != 0)
3204
		return ret;
3205 3206 3207 3208 3209 3210
	i915_gem_object_flush_gtt_write_domain(obj);

	/* If we're already fully in the CPU read domain, we're done. */
	if (obj_priv->page_cpu_valid == NULL &&
	    (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
		return 0;
3211

3212 3213 3214
	/* Otherwise, create/clear the per-page CPU read domain flag if we're
	 * newly adding I915_GEM_DOMAIN_CPU
	 */
3215
	if (obj_priv->page_cpu_valid == NULL) {
3216 3217
		obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
						   GFP_KERNEL);
3218 3219 3220 3221
		if (obj_priv->page_cpu_valid == NULL)
			return -ENOMEM;
	} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
		memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3222 3223 3224 3225

	/* Flush the cache on any pages that are still invalid from the CPU's
	 * perspective.
	 */
3226 3227
	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
	     i++) {
3228 3229 3230
		if (obj_priv->page_cpu_valid[i])
			continue;

3231
		drm_clflush_pages(obj_priv->pages + i, 1);
3232 3233 3234 3235

		obj_priv->page_cpu_valid[i] = 1;
	}

3236 3237 3238 3239 3240
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);

C
Chris Wilson 已提交
3241
	old_read_domains = obj->read_domains;
3242 3243
	obj->read_domains |= I915_GEM_DOMAIN_CPU;

C
Chris Wilson 已提交
3244 3245 3246 3247
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    obj->write_domain);

3248 3249 3250 3251 3252 3253 3254
	return 0;
}

/**
 * Pin an object to the GTT and evaluate the relocations landing in it.
 */
static int
3255 3256 3257
i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
			     struct drm_file *file_priv,
			     struct drm_i915_gem_exec_object2 *entry)
3258
{
3259
	struct drm_device *dev = obj->base.dev;
3260
	drm_i915_private_t *dev_priv = dev->dev_private;
3261
	struct drm_i915_gem_relocation_entry __user *user_relocs;
3262 3263 3264
	struct drm_gem_object *target_obj = NULL;
	uint32_t target_handle = 0;
	int i, ret = 0;
3265

3266
	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3267
	for (i = 0; i < entry->relocation_count; i++) {
3268
		struct drm_i915_gem_relocation_entry reloc;
3269
		uint32_t target_offset;
3270

3271 3272 3273 3274 3275
		if (__copy_from_user_inatomic(&reloc,
					      user_relocs+i,
					      sizeof(reloc))) {
			ret = -EFAULT;
			break;
J
Jesse Barnes 已提交
3276 3277
		}

3278 3279
		if (reloc.target_handle != target_handle) {
			drm_gem_object_unreference(target_obj);
3280

3281 3282 3283 3284 3285 3286 3287 3288
			target_obj = drm_gem_object_lookup(dev, file_priv,
							   reloc.target_handle);
			if (target_obj == NULL) {
				ret = -ENOENT;
				break;
			}

			target_handle = reloc.target_handle;
3289
		}
3290
		target_offset = to_intel_bo(target_obj)->gtt_offset;
3291

3292 3293 3294 3295 3296 3297
#if WATCH_RELOC
		DRM_INFO("%s: obj %p offset %08x target %d "
			 "read %08x write %08x gtt %08x "
			 "presumed %08x delta %08x\n",
			 __func__,
			 obj,
3298 3299 3300 3301
			 (int) reloc.offset,
			 (int) reloc.target_handle,
			 (int) reloc.read_domains,
			 (int) reloc.write_domain,
3302
			 (int) target_offset,
3303 3304
			 (int) reloc.presumed_offset,
			 reloc.delta);
3305 3306
#endif

3307 3308 3309
		/* The target buffer should have appeared before us in the
		 * exec_object list, so it should have a GTT space bound by now.
		 */
3310
		if (target_offset == 0) {
3311
			DRM_ERROR("No GTT space found for object %d\n",
3312
				  reloc.target_handle);
3313 3314
			ret = -EINVAL;
			break;
3315 3316
		}

3317
		/* Validate that the target is in a valid r/w GPU domain */
3318
		if (reloc.write_domain & (reloc.write_domain - 1)) {
3319 3320 3321
			DRM_ERROR("reloc with multiple write domains: "
				  "obj %p target %d offset %d "
				  "read %08x write %08x",
3322 3323 3324 3325
				  obj, reloc.target_handle,
				  (int) reloc.offset,
				  reloc.read_domains,
				  reloc.write_domain);
3326 3327
			ret = -EINVAL;
			break;
3328
		}
3329 3330
		if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
		    reloc.read_domains & I915_GEM_DOMAIN_CPU) {
3331 3332 3333
			DRM_ERROR("reloc with read/write CPU domains: "
				  "obj %p target %d offset %d "
				  "read %08x write %08x",
3334 3335 3336 3337
				  obj, reloc.target_handle,
				  (int) reloc.offset,
				  reloc.read_domains,
				  reloc.write_domain);
3338 3339
			ret = -EINVAL;
			break;
3340
		}
3341 3342
		if (reloc.write_domain && target_obj->pending_write_domain &&
		    reloc.write_domain != target_obj->pending_write_domain) {
3343 3344 3345
			DRM_ERROR("Write domain conflict: "
				  "obj %p target %d offset %d "
				  "new %08x old %08x\n",
3346 3347 3348
				  obj, reloc.target_handle,
				  (int) reloc.offset,
				  reloc.write_domain,
3349
				  target_obj->pending_write_domain);
3350 3351
			ret = -EINVAL;
			break;
3352 3353
		}

3354
		target_obj->pending_read_domains |= reloc.read_domains;
3355
		target_obj->pending_write_domain |= reloc.write_domain;
3356 3357 3358 3359

		/* If the relocation already has the right value in it, no
		 * more work needs to be done.
		 */
3360
		if (target_offset == reloc.presumed_offset)
3361 3362
			continue;

3363
		/* Check that the relocation address is valid... */
3364
		if (reloc.offset > obj->base.size - 4) {
3365 3366
			DRM_ERROR("Relocation beyond object bounds: "
				  "obj %p target %d offset %d size %d.\n",
3367
				  obj, reloc.target_handle,
3368 3369 3370
				  (int) reloc.offset, (int) obj->base.size);
			ret = -EINVAL;
			break;
3371
		}
3372
		if (reloc.offset & 3) {
3373 3374
			DRM_ERROR("Relocation not 4-byte aligned: "
				  "obj %p target %d offset %d.\n",
3375 3376
				  obj, reloc.target_handle,
				  (int) reloc.offset);
3377 3378
			ret = -EINVAL;
			break;
3379 3380 3381
		}

		/* and points to somewhere within the target object. */
3382
		if (reloc.delta >= target_obj->size) {
3383 3384
			DRM_ERROR("Relocation beyond target object bounds: "
				  "obj %p target %d delta %d size %d.\n",
3385 3386
				  obj, reloc.target_handle,
				  (int) reloc.delta, (int) target_obj->size);
3387 3388
			ret = -EINVAL;
			break;
3389 3390
		}

3391 3392
		reloc.delta += target_offset;
		if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3393 3394
			uint32_t page_offset = reloc.offset & ~PAGE_MASK;
			char *vaddr;
3395

3396
			vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
3397
			*(uint32_t *)(vaddr + page_offset) = reloc.delta;
3398
			kunmap_atomic(vaddr);
3399 3400 3401
		} else {
			uint32_t __iomem *reloc_entry;
			void __iomem *reloc_page;
3402

3403 3404 3405
			ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
			if (ret)
				break;
3406

3407
			/* Map the page containing the relocation we're going to perform.  */
3408
			reloc.offset += obj->gtt_offset;
3409
			reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3410
							      reloc.offset & PAGE_MASK);
3411 3412 3413
			reloc_entry = (uint32_t __iomem *)
				(reloc_page + (reloc.offset & ~PAGE_MASK));
			iowrite32(reloc.delta, reloc_entry);
3414
			io_mapping_unmap_atomic(reloc_page);
3415
		}
3416

3417 3418 3419 3420 3421 3422 3423 3424
		/* and update the user's relocation entry */
		reloc.presumed_offset = target_offset;
		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
					      &reloc.presumed_offset,
					      sizeof(reloc.presumed_offset))) {
		    ret = -EFAULT;
		    break;
		}
3425 3426
	}

3427
	drm_gem_object_unreference(target_obj);
3428 3429 3430
	return ret;
}

3431
static int
3432 3433 3434 3435 3436
i915_gem_execbuffer_pin(struct drm_device *dev,
			struct drm_file *file,
			struct drm_gem_object **object_list,
			struct drm_i915_gem_exec_object2 *exec_list,
			int count)
3437
{
3438 3439
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret, i, retry;
3440

3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458
	/* attempt to pin all of the buffers into the GTT */
	for (retry = 0; retry < 2; retry++) {
		ret = 0;
		for (i = 0; i < count; i++) {
			struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
			struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
			bool need_fence =
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;

			/* Check fence reg constraints and rebind if necessary */
			if (need_fence &&
			    !i915_gem_object_fence_offset_ok(&obj->base,
							     obj->tiling_mode)) {
				ret = i915_gem_object_unbind(&obj->base);
				if (ret)
					break;
			}
3459

3460 3461 3462
			ret = i915_gem_object_pin(&obj->base, entry->alignment);
			if (ret)
				break;
3463

3464 3465 3466 3467 3468 3469 3470 3471 3472 3473
			/*
			 * Pre-965 chips need a fence register set up in order
			 * to properly handle blits to/from tiled surfaces.
			 */
			if (need_fence) {
				ret = i915_gem_object_get_fence_reg(&obj->base, true);
				if (ret) {
					i915_gem_object_unpin(&obj->base);
					break;
				}
3474

3475 3476
				dev_priv->fence_regs[obj->fence_reg].gpu = true;
			}
3477

3478
			entry->offset = obj->gtt_offset;
3479 3480
		}

3481 3482 3483 3484 3485
		while (i--)
			i915_gem_object_unpin(object_list[i]);

		if (ret == 0)
			break;
3486

3487 3488 3489 3490 3491 3492
		if (ret != -ENOSPC || retry)
			return ret;

		ret = i915_gem_evict_everything(dev);
		if (ret)
			return ret;
3493 3494
	}

3495
	return 0;
3496 3497
}

3498 3499 3500
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3501 3502 3503 3504
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3505 3506 3507
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3508
static int
3509
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3510
{
3511 3512
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_file_private *file_priv = file->driver_priv;
3513
	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3514 3515 3516 3517
	struct drm_i915_gem_request *request;
	struct intel_ring_buffer *ring = NULL;
	u32 seqno = 0;
	int ret;
3518

3519
	spin_lock(&file_priv->mm.lock);
3520
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3521 3522
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3523

3524 3525
		ring = request->ring;
		seqno = request->seqno;
3526
	}
3527
	spin_unlock(&file_priv->mm.lock);
3528

3529 3530
	if (seqno == 0)
		return 0;
3531

3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543
	ret = 0;
	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
		/* And wait for the seqno passing without holding any locks and
		 * causing extra latency for others. This is safe as the irq
		 * generation is designed to be run atomically and so is
		 * lockless.
		 */
		ring->user_irq_get(dev, ring);
		ret = wait_event_interruptible(ring->irq_queue,
					       i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
					       || atomic_read(&dev_priv->mm.wedged));
		ring->user_irq_put(dev, ring);
3544

3545 3546
		if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
			ret = -EIO;
3547 3548
	}

3549 3550
	if (ret == 0)
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3551 3552 3553 3554

	return ret;
}

3555
static int
3556 3557
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
			  uint64_t exec_offset)
3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572
{
	uint32_t exec_start, exec_len;

	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
	exec_len = (uint32_t) exec->batch_len;

	if ((exec_start | exec_len) & 0x7)
		return -EINVAL;

	if (!exec_start)
		return -EINVAL;

	return 0;
}

3573
static int
3574 3575
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
3576
{
3577
	int i;
3578

3579 3580 3581
	for (i = 0; i < count; i++) {
		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
		size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
3582

3583 3584
		if (!access_ok(VERIFY_READ, ptr, length))
			return -EFAULT;
3585

3586 3587 3588 3589
		/* we may also need to update the presumed offsets */
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

3590 3591
		if (fault_in_pages_readable(ptr, length))
			return -EFAULT;
3592 3593
	}

3594
	return 0;
3595 3596
}

C
Chris Wilson 已提交
3597
static int
J
Jesse Barnes 已提交
3598
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3599
		       struct drm_file *file,
J
Jesse Barnes 已提交
3600 3601
		       struct drm_i915_gem_execbuffer2 *args,
		       struct drm_i915_gem_exec_object2 *exec_list)
3602 3603 3604 3605
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object **object_list = NULL;
	struct drm_gem_object *batch_obj;
3606
	struct drm_i915_gem_object *obj_priv;
3607
	struct drm_clip_rect *cliprects = NULL;
C
Chris Wilson 已提交
3608
	struct drm_i915_gem_request *request = NULL;
3609
	int ret, i, flips;
3610 3611
	uint64_t exec_offset;

3612 3613
	struct intel_ring_buffer *ring = NULL;

3614 3615 3616 3617
	ret = i915_gem_check_is_wedged(dev);
	if (ret)
		return ret;

3618 3619 3620 3621
	ret = validate_exec_list(exec_list, args->buffer_count);
	if (ret)
		return ret;

3622 3623 3624 3625
#if WATCH_EXEC
	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
3626 3627 3628 3629 3630 3631
	switch (args->flags & I915_EXEC_RING_MASK) {
	case I915_EXEC_DEFAULT:
	case I915_EXEC_RENDER:
		ring = &dev_priv->render_ring;
		break;
	case I915_EXEC_BSD:
3632
		if (!HAS_BSD(dev)) {
3633
			DRM_ERROR("execbuf with invalid ring (BSD)\n");
3634 3635 3636
			return -EINVAL;
		}
		ring = &dev_priv->bsd_ring;
3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648
		break;
	case I915_EXEC_BLT:
		if (!HAS_BLT(dev)) {
			DRM_ERROR("execbuf with invalid ring (BLT)\n");
			return -EINVAL;
		}
		ring = &dev_priv->blt_ring;
		break;
	default:
		DRM_ERROR("execbuf with unknown ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
3649 3650
	}

3651 3652 3653 3654
	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}
3655
	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
J
Jesse Barnes 已提交
3656 3657
	if (object_list == NULL) {
		DRM_ERROR("Failed to allocate object list for %d buffers\n",
3658 3659 3660 3661 3662
			  args->buffer_count);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

3663
	if (args->num_cliprects != 0) {
3664 3665
		cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
				    GFP_KERNEL);
3666 3667
		if (cliprects == NULL) {
			ret = -ENOMEM;
3668
			goto pre_mutex_err;
3669
		}
3670 3671 3672 3673 3674 3675 3676 3677

		ret = copy_from_user(cliprects,
				     (struct drm_clip_rect __user *)
				     (uintptr_t) args->cliprects_ptr,
				     sizeof(*cliprects) * args->num_cliprects);
		if (ret != 0) {
			DRM_ERROR("copy %d cliprects failed: %d\n",
				  args->num_cliprects, ret);
3678
			ret = -EFAULT;
3679 3680 3681 3682
			goto pre_mutex_err;
		}
	}

C
Chris Wilson 已提交
3683 3684 3685
	request = kzalloc(sizeof(*request), GFP_KERNEL);
	if (request == NULL) {
		ret = -ENOMEM;
3686
		goto pre_mutex_err;
C
Chris Wilson 已提交
3687
	}
3688

3689 3690
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
3691
		goto pre_mutex_err;
3692 3693 3694

	if (dev_priv->mm.suspended) {
		mutex_unlock(&dev->struct_mutex);
3695 3696
		ret = -EBUSY;
		goto pre_mutex_err;
3697 3698
	}

3699
	/* Look up object handles */
3700
	for (i = 0; i < args->buffer_count; i++) {
3701
		object_list[i] = drm_gem_object_lookup(dev, file,
3702 3703 3704 3705
						       exec_list[i].handle);
		if (object_list[i] == NULL) {
			DRM_ERROR("Invalid object handle %d at index %d\n",
				   exec_list[i].handle, i);
3706 3707
			/* prevent error path from reading uninitialized data */
			args->buffer_count = i + 1;
3708
			ret = -ENOENT;
3709 3710
			goto err;
		}
3711

3712
		obj_priv = to_intel_bo(object_list[i]);
3713 3714 3715
		if (obj_priv->in_execbuffer) {
			DRM_ERROR("Object %p appears more than once in object list\n",
				   object_list[i]);
3716 3717
			/* prevent error path from reading uninitialized data */
			args->buffer_count = i + 1;
3718
			ret = -EINVAL;
3719 3720 3721
			goto err;
		}
		obj_priv->in_execbuffer = true;
3722
	}
3723

3724 3725 3726 3727 3728 3729
	/* Move the objects en-masse into the GTT, evicting if necessary. */
	ret = i915_gem_execbuffer_pin(dev, file,
				      object_list, exec_list,
				      args->buffer_count);
	if (ret)
		goto err;
3730

3731 3732 3733 3734 3735 3736 3737
	/* The objects are in their final locations, apply the relocations. */
	for (i = 0; i < args->buffer_count; i++) {
		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
		obj->base.pending_read_domains = 0;
		obj->base.pending_write_domain = 0;
		ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
		if (ret)
3738
			goto err;
3739 3740 3741 3742
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	batch_obj = object_list[args->buffer_count-1];
3743 3744 3745 3746 3747 3748
	if (batch_obj->pending_write_domain) {
		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
		ret = -EINVAL;
		goto err;
	}
	batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3749

3750 3751 3752
	/* Sanity check the batch buffer */
	exec_offset = to_intel_bo(batch_obj)->gtt_offset;
	ret = i915_gem_check_execbuffer(args, exec_offset);
3753 3754 3755 3756 3757
	if (ret != 0) {
		DRM_ERROR("execbuf with invalid offset/length\n");
		goto err;
	}

3758 3759 3760 3761 3762 3763
	/* Zero the global flush/invalidate flags. These
	 * will be modified as new domains are computed
	 * for each object
	 */
	dev->invalidate_domains = 0;
	dev->flush_domains = 0;
3764
	dev_priv->mm.flush_rings = 0;
3765

3766 3767 3768
	for (i = 0; i < args->buffer_count; i++) {
		struct drm_gem_object *obj = object_list[i];

3769
		/* Compute new gpu domains and update invalidate/flush */
3770
		i915_gem_object_set_to_gpu_domain(obj, ring);
3771 3772
	}

3773 3774 3775 3776 3777 3778 3779
	if (dev->invalidate_domains | dev->flush_domains) {
#if WATCH_EXEC
		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
			  __func__,
			 dev->invalidate_domains,
			 dev->flush_domains);
#endif
3780
		i915_gem_flush(dev, file,
3781
			       dev->invalidate_domains,
3782 3783
			       dev->flush_domains,
			       dev_priv->mm.flush_rings);
3784
	}
3785

3786 3787
	for (i = 0; i < args->buffer_count; i++) {
		struct drm_gem_object *obj = object_list[i];
C
Chris Wilson 已提交
3788
		uint32_t old_write_domain = obj->write_domain;
3789
		obj->write_domain = obj->pending_write_domain;
C
Chris Wilson 已提交
3790 3791 3792
		trace_i915_gem_object_change_domain(obj,
						    obj->read_domains,
						    old_write_domain);
3793 3794
	}

3795 3796 3797 3798 3799 3800 3801 3802
#if WATCH_COHERENCY
	for (i = 0; i < args->buffer_count; i++) {
		i915_gem_object_check_coherency(object_list[i],
						exec_list[i].handle);
	}
#endif

#if WATCH_EXEC
3803
	i915_gem_dump_object(batch_obj,
3804 3805 3806 3807 3808
			      args->batch_len,
			      __func__,
			      ~0);
#endif

3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837
	/* Check for any pending flips. As we only maintain a flip queue depth
	 * of 1, we can simply insert a WAIT for the next display flip prior
	 * to executing the batch and avoid stalling the CPU.
	 */
	flips = 0;
	for (i = 0; i < args->buffer_count; i++) {
		if (object_list[i]->write_domain)
			flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
	}
	if (flips) {
		int plane, flip_mask;

		for (plane = 0; flips >> plane; plane++) {
			if (((flips >> plane) & 1) == 0)
				continue;

			if (plane)
				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
			else
				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;

			intel_ring_begin(dev, ring, 2);
			intel_ring_emit(dev, ring,
					MI_WAIT_FOR_EVENT | flip_mask);
			intel_ring_emit(dev, ring, MI_NOOP);
			intel_ring_advance(dev, ring);
		}
	}

3838
	/* Exec the batchbuffer */
3839
	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3840
					    cliprects, exec_offset);
3841 3842 3843 3844 3845 3846 3847 3848 3849
	if (ret) {
		DRM_ERROR("dispatch failed %d\n", ret);
		goto err;
	}

	/*
	 * Ensure that the commands in the batch buffer are
	 * finished before the interrupt fires
	 */
3850
	i915_retire_commands(dev, ring);
3851 3852 3853 3854

	for (i = 0; i < args->buffer_count; i++) {
		struct drm_gem_object *obj = object_list[i];

3855
		i915_gem_object_move_to_active(obj, ring);
3856 3857 3858
		if (obj->write_domain)
			list_move_tail(&to_intel_bo(obj)->gpu_write_list,
				       &ring->gpu_write_list);
3859 3860
	}

3861
	i915_add_request(dev, file, request, ring);
C
Chris Wilson 已提交
3862
	request = NULL;
3863 3864

err:
3865 3866
	for (i = 0; i < args->buffer_count; i++) {
		if (object_list[i]) {
3867
			obj_priv = to_intel_bo(object_list[i]);
3868 3869
			obj_priv->in_execbuffer = false;
		}
3870
		drm_gem_object_unreference(object_list[i]);
3871
	}
3872 3873 3874

	mutex_unlock(&dev->struct_mutex);

3875
pre_mutex_err:
3876
	drm_free_large(object_list);
3877
	kfree(cliprects);
C
Chris Wilson 已提交
3878
	kfree(request);
3879 3880 3881 3882

	return ret;
}

J
Jesse Barnes 已提交
3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934
/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

#if WATCH_EXEC
	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif

	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
			     (struct drm_i915_relocation_entry __user *)
			     (uintptr_t) args->buffers_ptr,
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
		DRM_ERROR("copy %d exec entries failed %d\n",
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
3935
		if (INTEL_INFO(dev)->gen < 4)
J
Jesse Barnes 已提交
3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
3949
	exec2.flags = I915_EXEC_RENDER;
J
Jesse Barnes 已提交
3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027

	ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
				   (uintptr_t) args->buffers_ptr,
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
			DRM_ERROR("failed to copy %d exec entries "
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file_priv)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

#if WATCH_EXEC
	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif

	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}

	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec2_list == NULL) {
		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
			     (struct drm_i915_relocation_entry __user *)
			     (uintptr_t) args->buffers_ptr,
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
		DRM_ERROR("copy %d exec entries failed %d\n",
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
				   (uintptr_t) args->buffers_ptr,
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
			DRM_ERROR("failed to copy %d exec entries "
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}

4028 4029 4030 4031
int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
	struct drm_device *dev = obj->dev;
C
Chris Wilson 已提交
4032
	struct drm_i915_private *dev_priv = dev->dev_private;
4033
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4034 4035
	int ret;

4036
	BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4037
	WARN_ON(i915_verify_lists(dev));
4038 4039 4040 4041 4042

	if (obj_priv->gtt_space != NULL) {
		if (alignment == 0)
			alignment = i915_gem_get_gtt_alignment(obj);
		if (obj_priv->gtt_offset & (alignment - 1)) {
4043 4044 4045 4046
			WARN(obj_priv->pin_count,
			     "bo is already pinned with incorrect alignment:"
			     " offset=%x, req.alignment=%x\n",
			     obj_priv->gtt_offset, alignment);
4047 4048 4049 4050 4051 4052
			ret = i915_gem_object_unbind(obj);
			if (ret)
				return ret;
		}
	}

4053 4054
	if (obj_priv->gtt_space == NULL) {
		ret = i915_gem_object_bind_to_gtt(obj, alignment);
4055
		if (ret)
4056
			return ret;
4057
	}
J
Jesse Barnes 已提交
4058

4059 4060 4061 4062 4063 4064
	obj_priv->pin_count++;

	/* If the object is not active and not pending a flush,
	 * remove it from the inactive list
	 */
	if (obj_priv->pin_count == 1) {
4065
		i915_gem_info_add_pin(dev_priv, obj->size);
C
Chris Wilson 已提交
4066
		if (!obj_priv->active)
4067
			list_move_tail(&obj_priv->mm_list,
C
Chris Wilson 已提交
4068
				       &dev_priv->mm.pinned_list);
4069 4070
	}

4071
	WARN_ON(i915_verify_lists(dev));
4072 4073 4074 4075 4076 4077 4078 4079
	return 0;
}

void
i915_gem_object_unpin(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	drm_i915_private_t *dev_priv = dev->dev_private;
4080
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4081

4082
	WARN_ON(i915_verify_lists(dev));
4083 4084 4085 4086 4087 4088 4089 4090 4091
	obj_priv->pin_count--;
	BUG_ON(obj_priv->pin_count < 0);
	BUG_ON(obj_priv->gtt_space == NULL);

	/* If the object is no longer pinned, and is
	 * neither active nor being flushed, then stick it on
	 * the inactive list
	 */
	if (obj_priv->pin_count == 0) {
C
Chris Wilson 已提交
4092
		if (!obj_priv->active)
4093
			list_move_tail(&obj_priv->mm_list,
4094
				       &dev_priv->mm.inactive_list);
4095
		i915_gem_info_remove_pin(dev_priv, obj->size);
4096
	}
4097
	WARN_ON(i915_verify_lists(dev));
4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108
}

int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_pin *args = data;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret;

4109 4110 4111
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
4112 4113 4114

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL) {
4115 4116
		ret = -ENOENT;
		goto unlock;
4117
	}
4118
	obj_priv = to_intel_bo(obj);
4119

C
Chris Wilson 已提交
4120 4121
	if (obj_priv->madv != I915_MADV_WILLNEED) {
		DRM_ERROR("Attempting to pin a purgeable buffer\n");
4122 4123
		ret = -EINVAL;
		goto out;
4124 4125
	}

J
Jesse Barnes 已提交
4126 4127 4128
	if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
			  args->handle);
4129 4130
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
4131 4132 4133 4134 4135 4136
	}

	obj_priv->user_pin_count++;
	obj_priv->pin_filp = file_priv;
	if (obj_priv->user_pin_count == 1) {
		ret = i915_gem_object_pin(obj, args->alignment);
4137 4138
		if (ret)
			goto out;
4139 4140 4141 4142 4143
	}

	/* XXX - flush the CPU caches for pinned objects
	 * as the X server doesn't manage domains yet
	 */
4144
	i915_gem_object_flush_cpu_write_domain(obj);
4145
	args->offset = obj_priv->gtt_offset;
4146
out:
4147
	drm_gem_object_unreference(obj);
4148
unlock:
4149
	mutex_unlock(&dev->struct_mutex);
4150
	return ret;
4151 4152 4153 4154 4155 4156 4157 4158
}

int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
		     struct drm_file *file_priv)
{
	struct drm_i915_gem_pin *args = data;
	struct drm_gem_object *obj;
J
Jesse Barnes 已提交
4159
	struct drm_i915_gem_object *obj_priv;
4160
	int ret;
4161

4162 4163 4164
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
4165 4166 4167

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL) {
4168 4169
		ret = -ENOENT;
		goto unlock;
4170
	}
4171
	obj_priv = to_intel_bo(obj);
4172

J
Jesse Barnes 已提交
4173 4174 4175
	if (obj_priv->pin_filp != file_priv) {
		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
			  args->handle);
4176 4177
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
4178 4179 4180 4181 4182 4183
	}
	obj_priv->user_pin_count--;
	if (obj_priv->user_pin_count == 0) {
		obj_priv->pin_filp = NULL;
		i915_gem_object_unpin(obj);
	}
4184

4185
out:
4186
	drm_gem_object_unreference(obj);
4187
unlock:
4188
	mutex_unlock(&dev->struct_mutex);
4189
	return ret;
4190 4191 4192 4193 4194 4195 4196 4197 4198
}

int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_i915_gem_busy *args = data;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
4199 4200
	int ret;

4201
	ret = i915_mutex_lock_interruptible(dev);
4202
	if (ret)
4203
		return ret;
4204 4205 4206

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL) {
4207 4208
		ret = -ENOENT;
		goto unlock;
4209
	}
4210
	obj_priv = to_intel_bo(obj);
4211

4212 4213 4214 4215
	/* Count all active objects as busy, even if they are currently not used
	 * by the gpu. Users of this interface expect objects to eventually
	 * become non-busy without any further actions, therefore emit any
	 * necessary flushes here.
4216
	 */
4217 4218 4219 4220 4221 4222 4223
	args->busy = obj_priv->active;
	if (args->busy) {
		/* Unconditionally flush objects, even when the gpu still uses this
		 * object. Userspace calling this function indicates that it wants to
		 * use this buffer rather sooner than later, so issuing the required
		 * flush earlier is beneficial.
		 */
4224 4225
		if (obj->write_domain & I915_GEM_GPU_DOMAINS)
			i915_gem_flush_ring(dev, file_priv,
4226 4227
					    obj_priv->ring,
					    0, obj->write_domain);
4228 4229 4230 4231 4232 4233 4234 4235 4236 4237

		/* Update the active list for the hardware's current position.
		 * Otherwise this only updates on a delayed timer or when irqs
		 * are actually unmasked, and our working set ends up being
		 * larger than required.
		 */
		i915_gem_retire_requests_ring(dev, obj_priv->ring);

		args->busy = obj_priv->active;
	}
4238 4239

	drm_gem_object_unreference(obj);
4240
unlock:
4241
	mutex_unlock(&dev->struct_mutex);
4242
	return ret;
4243 4244 4245 4246 4247 4248 4249 4250 4251
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
    return i915_gem_ring_throttle(dev, file_priv);
}

4252 4253 4254 4255 4256 4257 4258
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct drm_i915_gem_madvise *args = data;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
4259
	int ret;
4260 4261 4262 4263 4264 4265 4266 4267 4268

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4269 4270 4271 4272
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

4273 4274
	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL) {
4275 4276
		ret = -ENOENT;
		goto unlock;
4277
	}
4278
	obj_priv = to_intel_bo(obj);
4279 4280

	if (obj_priv->pin_count) {
4281 4282
		ret = -EINVAL;
		goto out;
4283 4284
	}

C
Chris Wilson 已提交
4285 4286
	if (obj_priv->madv != __I915_MADV_PURGED)
		obj_priv->madv = args->madv;
4287

4288 4289 4290 4291 4292
	/* if the object is no longer bound, discard its backing storage */
	if (i915_gem_object_is_purgeable(obj_priv) &&
	    obj_priv->gtt_space == NULL)
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
4293 4294
	args->retained = obj_priv->madv != __I915_MADV_PURGED;

4295
out:
4296
	drm_gem_object_unreference(obj);
4297
unlock:
4298
	mutex_unlock(&dev->struct_mutex);
4299
	return ret;
4300 4301
}

4302 4303 4304
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
					      size_t size)
{
4305
	struct drm_i915_private *dev_priv = dev->dev_private;
4306
	struct drm_i915_gem_object *obj;
4307

4308 4309 4310
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
	if (obj == NULL)
		return NULL;
4311

4312 4313 4314 4315
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
		kfree(obj);
		return NULL;
	}
4316

4317 4318
	i915_gem_info_add_obj(dev_priv, size);

4319 4320
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4321

4322
	obj->agp_type = AGP_USER_MEMORY;
4323
	obj->base.driver_private = NULL;
4324
	obj->fence_reg = I915_FENCE_REG_NONE;
4325 4326
	INIT_LIST_HEAD(&obj->mm_list);
	INIT_LIST_HEAD(&obj->ring_list);
4327 4328
	INIT_LIST_HEAD(&obj->gpu_write_list);
	obj->madv = I915_MADV_WILLNEED;
4329

4330 4331 4332 4333 4334 4335
	return &obj->base;
}

int i915_gem_init_object(struct drm_gem_object *obj)
{
	BUG();
4336

4337 4338 4339
	return 0;
}

4340
static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4341
{
4342
	struct drm_device *dev = obj->dev;
4343
	drm_i915_private_t *dev_priv = dev->dev_private;
4344
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4345
	int ret;
4346

4347 4348
	ret = i915_gem_object_unbind(obj);
	if (ret == -ERESTARTSYS) {
4349
		list_move(&obj_priv->mm_list,
4350 4351 4352
			  &dev_priv->mm.deferred_free_list);
		return;
	}
4353

4354 4355
	if (obj_priv->mmap_offset)
		i915_gem_free_mmap_offset(obj);
4356

4357
	drm_gem_object_release(obj);
4358
	i915_gem_info_remove_obj(dev_priv, obj->size);
4359

4360
	kfree(obj_priv->page_cpu_valid);
4361
	kfree(obj_priv->bit_17);
4362
	kfree(obj_priv);
4363 4364
}

4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380
void i915_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);

	trace_i915_gem_object_destroy(obj);

	while (obj_priv->pin_count > 0)
		i915_gem_object_unpin(obj);

	if (obj_priv->phys_obj)
		i915_gem_detach_phys_object(dev, obj);

	i915_gem_free_object_tail(obj);
}

4381 4382 4383 4384 4385
int
i915_gem_idle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
4386

4387
	mutex_lock(&dev->struct_mutex);
C
Chris Wilson 已提交
4388

4389
	if (dev_priv->mm.suspended) {
4390 4391
		mutex_unlock(&dev->struct_mutex);
		return 0;
4392 4393
	}

4394
	ret = i915_gpu_idle(dev);
4395 4396
	if (ret) {
		mutex_unlock(&dev->struct_mutex);
4397
		return ret;
4398
	}
4399

4400 4401
	/* Under UMS, be paranoid and evict. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4402
		ret = i915_gem_evict_inactive(dev);
4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413
		if (ret) {
			mutex_unlock(&dev->struct_mutex);
			return ret;
		}
	}

	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
	 * We need to replace this with a semaphore, or something.
	 * And not confound mm.suspended!
	 */
	dev_priv->mm.suspended = 1;
4414
	del_timer_sync(&dev_priv->hangcheck_timer);
4415 4416

	i915_kernel_lost_context(dev);
4417
	i915_gem_cleanup_ringbuffer(dev);
4418

4419 4420
	mutex_unlock(&dev->struct_mutex);

4421 4422 4423
	/* Cancel the retire work handler, which should be idle now. */
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);

4424 4425 4426
	return 0;
}

4427 4428 4429 4430
/*
 * 965+ support PIPE_CONTROL commands, which provide finer grained control
 * over cache flushing.
 */
4431
static int
4432 4433 4434 4435 4436 4437 4438
i915_gem_init_pipe_control(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;
	int ret;

4439
	obj = i915_gem_alloc_object(dev, 4096);
4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469
	if (obj == NULL) {
		DRM_ERROR("Failed to allocate seqno page\n");
		ret = -ENOMEM;
		goto err;
	}
	obj_priv = to_intel_bo(obj);
	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;

	ret = i915_gem_object_pin(obj, 4096);
	if (ret)
		goto err_unref;

	dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
	dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
	if (dev_priv->seqno_page == NULL)
		goto err_unpin;

	dev_priv->seqno_obj = obj;
	memset(dev_priv->seqno_page, 0, PAGE_SIZE);

	return 0;

err_unpin:
	i915_gem_object_unpin(obj);
err_unref:
	drm_gem_object_unreference(obj);
err:
	return ret;
}

4470 4471

static void
4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485
i915_gem_cleanup_pipe_control(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

	obj = dev_priv->seqno_obj;
	obj_priv = to_intel_bo(obj);
	kunmap(obj_priv->pages[0]);
	i915_gem_object_unpin(obj);
	drm_gem_object_unreference(obj);
	dev_priv->seqno_obj = NULL;

	dev_priv->seqno_page = NULL;
4486 4487
}

4488 4489 4490 4491 4492
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
4493

4494 4495 4496 4497 4498
	if (HAS_PIPE_CONTROL(dev)) {
		ret = i915_gem_init_pipe_control(dev);
		if (ret)
			return ret;
	}
4499

4500
	ret = intel_init_render_ring_buffer(dev);
4501 4502 4503 4504
	if (ret)
		goto cleanup_pipe_control;

	if (HAS_BSD(dev)) {
4505
		ret = intel_init_bsd_ring_buffer(dev);
4506 4507
		if (ret)
			goto cleanup_render_ring;
4508
	}
4509

4510 4511 4512 4513 4514 4515
	if (HAS_BLT(dev)) {
		ret = intel_init_blt_ring_buffer(dev);
		if (ret)
			goto cleanup_bsd_ring;
	}

4516 4517
	dev_priv->next_seqno = 1;

4518 4519
	return 0;

4520 4521
cleanup_bsd_ring:
	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4522 4523 4524 4525 4526
cleanup_render_ring:
	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
cleanup_pipe_control:
	if (HAS_PIPE_CONTROL(dev))
		i915_gem_cleanup_pipe_control(dev);
4527 4528 4529 4530 4531 4532 4533 4534 4535
	return ret;
}

void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
4536
	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
4537
	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
4538 4539 4540 4541
	if (HAS_PIPE_CONTROL(dev))
		i915_gem_cleanup_pipe_control(dev);
}

4542 4543 4544 4545 4546 4547 4548
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;

J
Jesse Barnes 已提交
4549 4550 4551
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

4552
	if (atomic_read(&dev_priv->mm.wedged)) {
4553
		DRM_ERROR("Reenabling wedged hardware, good luck\n");
4554
		atomic_set(&dev_priv->mm.wedged, 0);
4555 4556 4557
	}

	mutex_lock(&dev->struct_mutex);
4558 4559 4560
	dev_priv->mm.suspended = 0;

	ret = i915_gem_init_ringbuffer(dev);
4561 4562
	if (ret != 0) {
		mutex_unlock(&dev->struct_mutex);
4563
		return ret;
4564
	}
4565

4566
	BUG_ON(!list_empty(&dev_priv->mm.active_list));
4567
	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4568
	BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
4569
	BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
4570 4571
	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4572
	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
4573
	BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
4574
	BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
4575
	mutex_unlock(&dev->struct_mutex);
4576

4577 4578 4579
	ret = drm_irq_install(dev);
	if (ret)
		goto cleanup_ringbuffer;
4580

4581
	return 0;
4582 4583 4584 4585 4586 4587 4588 4589

cleanup_ringbuffer:
	mutex_lock(&dev->struct_mutex);
	i915_gem_cleanup_ringbuffer(dev);
	dev_priv->mm.suspended = 1;
	mutex_unlock(&dev->struct_mutex);

	return ret;
4590 4591 4592 4593 4594 4595
}

int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
J
Jesse Barnes 已提交
4596 4597 4598
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

4599
	drm_irq_uninstall(dev);
4600
	return i915_gem_idle(dev);
4601 4602 4603 4604 4605 4606 4607
}

void
i915_gem_lastclose(struct drm_device *dev)
{
	int ret;

4608 4609 4610
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return;

4611 4612 4613
	ret = i915_gem_idle(dev);
	if (ret)
		DRM_ERROR("failed to idle hardware: %d\n", ret);
4614 4615
}

4616 4617 4618 4619 4620 4621 4622 4623
static void
init_ring_lists(struct intel_ring_buffer *ring)
{
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
	INIT_LIST_HEAD(&ring->gpu_write_list);
}

4624 4625 4626
void
i915_gem_load(struct drm_device *dev)
{
4627
	int i;
4628 4629
	drm_i915_private_t *dev_priv = dev->dev_private;

4630
	INIT_LIST_HEAD(&dev_priv->mm.active_list);
4631 4632
	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
C
Chris Wilson 已提交
4633
	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4634
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4635
	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4636 4637 4638
	init_ring_lists(&dev_priv->render_ring);
	init_ring_lists(&dev_priv->bsd_ring);
	init_ring_lists(&dev_priv->blt_ring);
4639 4640
	for (i = 0; i < 16; i++)
		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4641 4642
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
			  i915_gem_retire_work_handler);
4643
	init_completion(&dev_priv->error_completion);
4644 4645 4646 4647
	spin_lock(&shrink_list_lock);
	list_add(&dev_priv->mm.shrink_list, &shrink_list);
	spin_unlock(&shrink_list_lock);

4648 4649 4650 4651 4652 4653 4654 4655 4656 4657
	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
	if (IS_GEN3(dev)) {
		u32 tmp = I915_READ(MI_ARB_STATE);
		if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
			/* arb state is a masked write, so set bit + bit in mask */
			tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
			I915_WRITE(MI_ARB_STATE, tmp);
		}
	}

4658
	/* Old X drivers will take 0-2 for front, back, depth buffers */
4659 4660
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		dev_priv->fence_reg_start = 3;
4661

4662
	if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4663 4664 4665 4666
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4667
	/* Initialize fence registers to zero */
4668 4669 4670 4671 4672 4673 4674
	switch (INTEL_INFO(dev)->gen) {
	case 6:
		for (i = 0; i < 16; i++)
			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
		break;
	case 5:
	case 4:
4675 4676
		for (i = 0; i < 16; i++)
			I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4677 4678
		break;
	case 3:
4679 4680 4681
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
			for (i = 0; i < 8; i++)
				I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4682 4683 4684 4685
	case 2:
		for (i = 0; i < 8; i++)
			I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
		break;
4686
	}
4687
	i915_gem_detect_bit_6_swizzle(dev);
4688
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4689
}
4690 4691 4692 4693 4694

/*
 * Create a physically contiguous memory object for this object
 * e.g. for cursor + overlay regs
 */
4695 4696
static int i915_gem_init_phys_object(struct drm_device *dev,
				     int id, int size, int align)
4697 4698 4699 4700 4701 4702 4703 4704
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_phys_object *phys_obj;
	int ret;

	if (dev_priv->mm.phys_objs[id - 1] || !size)
		return 0;

4705
	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4706 4707 4708 4709 4710
	if (!phys_obj)
		return -ENOMEM;

	phys_obj->id = id;

4711
	phys_obj->handle = drm_pci_alloc(dev, size, align);
4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723
	if (!phys_obj->handle) {
		ret = -ENOMEM;
		goto kfree_obj;
	}
#ifdef CONFIG_X86
	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif

	dev_priv->mm.phys_objs[id - 1] = phys_obj;

	return 0;
kfree_obj:
4724
	kfree(phys_obj);
4725 4726 4727
	return ret;
}

4728
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_phys_object *phys_obj;

	if (!dev_priv->mm.phys_objs[id - 1])
		return;

	phys_obj = dev_priv->mm.phys_objs[id - 1];
	if (phys_obj->cur_obj) {
		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
	}

#ifdef CONFIG_X86
	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif
	drm_pci_free(dev, phys_obj->handle);
	kfree(phys_obj);
	dev_priv->mm.phys_objs[id - 1] = NULL;
}

void i915_gem_free_all_phys_object(struct drm_device *dev)
{
	int i;

4753
	for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764
		i915_gem_free_phys_object(dev, i);
}

void i915_gem_detach_phys_object(struct drm_device *dev,
				 struct drm_gem_object *obj)
{
	struct drm_i915_gem_object *obj_priv;
	int i;
	int ret;
	int page_count;

4765
	obj_priv = to_intel_bo(obj);
4766 4767 4768
	if (!obj_priv->phys_obj)
		return;

4769
	ret = i915_gem_object_get_pages(obj, 0);
4770 4771 4772 4773 4774 4775
	if (ret)
		goto out;

	page_count = obj->size / PAGE_SIZE;

	for (i = 0; i < page_count; i++) {
P
Peter Zijlstra 已提交
4776
		char *dst = kmap_atomic(obj_priv->pages[i]);
4777 4778 4779
		char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);

		memcpy(dst, src, PAGE_SIZE);
P
Peter Zijlstra 已提交
4780
		kunmap_atomic(dst);
4781
	}
4782
	drm_clflush_pages(obj_priv->pages, page_count);
4783
	drm_agp_chipset_flush(dev);
4784 4785

	i915_gem_object_put_pages(obj);
4786 4787 4788 4789 4790 4791 4792
out:
	obj_priv->phys_obj->cur_obj = NULL;
	obj_priv->phys_obj = NULL;
}

int
i915_gem_attach_phys_object(struct drm_device *dev,
4793 4794 4795
			    struct drm_gem_object *obj,
			    int id,
			    int align)
4796 4797 4798 4799 4800 4801 4802 4803 4804 4805
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj_priv;
	int ret = 0;
	int page_count;
	int i;

	if (id > I915_MAX_PHYS_OBJECT)
		return -EINVAL;

4806
	obj_priv = to_intel_bo(obj);
4807 4808 4809 4810 4811 4812 4813 4814 4815 4816

	if (obj_priv->phys_obj) {
		if (obj_priv->phys_obj->id == id)
			return 0;
		i915_gem_detach_phys_object(dev, obj);
	}

	/* create a new object */
	if (!dev_priv->mm.phys_objs[id - 1]) {
		ret = i915_gem_init_phys_object(dev, id,
4817
						obj->size, align);
4818
		if (ret) {
4819
			DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4820 4821 4822 4823 4824 4825 4826 4827
			goto out;
		}
	}

	/* bind to the object */
	obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
	obj_priv->phys_obj->cur_obj = obj;

4828
	ret = i915_gem_object_get_pages(obj, 0);
4829 4830 4831 4832 4833 4834 4835 4836
	if (ret) {
		DRM_ERROR("failed to get page list\n");
		goto out;
	}

	page_count = obj->size / PAGE_SIZE;

	for (i = 0; i < page_count; i++) {
P
Peter Zijlstra 已提交
4837
		char *src = kmap_atomic(obj_priv->pages[i]);
4838 4839 4840
		char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);

		memcpy(dst, src, PAGE_SIZE);
P
Peter Zijlstra 已提交
4841
		kunmap_atomic(src);
4842 4843
	}

4844 4845
	i915_gem_object_put_pages(obj);

4846 4847 4848 4849 4850 4851 4852 4853 4854 4855
	return 0;
out:
	return ret;
}

static int
i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
4856
	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4857 4858 4859 4860 4861 4862 4863
	void *obj_addr;
	int ret;
	char __user *user_data;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;

4864
	DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4865 4866 4867 4868 4869 4870 4871
	ret = copy_from_user(obj_addr, user_data, args->size);
	if (ret)
		return -EFAULT;

	drm_agp_chipset_flush(dev);
	return 0;
}
4872

4873
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4874
{
4875
	struct drm_i915_file_private *file_priv = file->driver_priv;
4876 4877 4878 4879 4880

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4881
	spin_lock(&file_priv->mm.lock);
4882 4883 4884 4885 4886 4887 4888 4889 4890
	while (!list_empty(&file_priv->mm.request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&file_priv->mm.request_list,
					   struct drm_i915_gem_request,
					   client_list);
		list_del(&request->client_list);
		request->file_priv = NULL;
	}
4891
	spin_unlock(&file_priv->mm.lock);
4892
}
4893

4894 4895 4896 4897 4898 4899 4900
static int
i915_gpu_is_active(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int lists_empty;

	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4901
		      list_empty(&dev_priv->mm.active_list);
4902 4903 4904 4905

	return !lists_empty;
}

4906
static int
4907
i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922
{
	drm_i915_private_t *dev_priv, *next_dev;
	struct drm_i915_gem_object *obj_priv, *next_obj;
	int cnt = 0;
	int would_deadlock = 1;

	/* "fast-path" to count number of available objects */
	if (nr_to_scan == 0) {
		spin_lock(&shrink_list_lock);
		list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
			struct drm_device *dev = dev_priv->dev;

			if (mutex_trylock(&dev->struct_mutex)) {
				list_for_each_entry(obj_priv,
						    &dev_priv->mm.inactive_list,
4923
						    mm_list)
4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934
					cnt++;
				mutex_unlock(&dev->struct_mutex);
			}
		}
		spin_unlock(&shrink_list_lock);

		return (cnt / 100) * sysctl_vfs_cache_pressure;
	}

	spin_lock(&shrink_list_lock);

4935
rescan:
4936 4937 4938 4939 4940 4941 4942 4943 4944
	/* first scan for clean buffers */
	list_for_each_entry_safe(dev_priv, next_dev,
				 &shrink_list, mm.shrink_list) {
		struct drm_device *dev = dev_priv->dev;

		if (! mutex_trylock(&dev->struct_mutex))
			continue;

		spin_unlock(&shrink_list_lock);
4945
		i915_gem_retire_requests(dev);
4946 4947 4948

		list_for_each_entry_safe(obj_priv, next_obj,
					 &dev_priv->mm.inactive_list,
4949
					 mm_list) {
4950
			if (i915_gem_object_is_purgeable(obj_priv)) {
4951
				i915_gem_object_unbind(&obj_priv->base);
4952 4953 4954 4955 4956 4957 4958 4959
				if (--nr_to_scan <= 0)
					break;
			}
		}

		spin_lock(&shrink_list_lock);
		mutex_unlock(&dev->struct_mutex);

4960 4961
		would_deadlock = 0;

4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977
		if (nr_to_scan <= 0)
			break;
	}

	/* second pass, evict/count anything still on the inactive list */
	list_for_each_entry_safe(dev_priv, next_dev,
				 &shrink_list, mm.shrink_list) {
		struct drm_device *dev = dev_priv->dev;

		if (! mutex_trylock(&dev->struct_mutex))
			continue;

		spin_unlock(&shrink_list_lock);

		list_for_each_entry_safe(obj_priv, next_obj,
					 &dev_priv->mm.inactive_list,
4978
					 mm_list) {
4979
			if (nr_to_scan > 0) {
4980
				i915_gem_object_unbind(&obj_priv->base);
4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991
				nr_to_scan--;
			} else
				cnt++;
		}

		spin_lock(&shrink_list_lock);
		mutex_unlock(&dev->struct_mutex);

		would_deadlock = 0;
	}

4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021
	if (nr_to_scan) {
		int active = 0;

		/*
		 * We are desperate for pages, so as a last resort, wait
		 * for the GPU to finish and discard whatever we can.
		 * This has a dramatic impact to reduce the number of
		 * OOM-killer events whilst running the GPU aggressively.
		 */
		list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
			struct drm_device *dev = dev_priv->dev;

			if (!mutex_trylock(&dev->struct_mutex))
				continue;

			spin_unlock(&shrink_list_lock);

			if (i915_gpu_is_active(dev)) {
				i915_gpu_idle(dev);
				active++;
			}

			spin_lock(&shrink_list_lock);
			mutex_unlock(&dev->struct_mutex);
		}

		if (active)
			goto rescan;
	}

5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047
	spin_unlock(&shrink_list_lock);

	if (would_deadlock)
		return -1;
	else if (cnt > 0)
		return (cnt / 100) * sysctl_vfs_cache_pressure;
	else
		return 0;
}

static struct shrinker shrinker = {
	.shrink = i915_gem_shrink,
	.seeks = DEFAULT_SEEKS,
};

__init void
i915_gem_shrinker_init(void)
{
    register_shrinker(&shrinker);
}

__exit void
i915_gem_shrinker_exit(void)
{
    unregister_shrinker(&shrinker);
}