i915_gem.c 101.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
C
Chris Wilson 已提交
32
#include "i915_trace.h"
33
#include "intel_drv.h"
34
#include <linux/slab.h>
35
#include <linux/swap.h>
J
Jesse Barnes 已提交
36
#include <linux/pci.h>
37

38
static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
39 40 41
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
42
					     bool write);
43
static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
44 45
						     uint64_t offset,
						     uint64_t size);
46 47
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
48
				       unsigned alignment,
49
				       bool map_and_fenceable);
50 51
static void i915_gem_clear_fence_reg(struct drm_device *dev,
				     struct drm_i915_fence_reg *reg);
52 53
static int i915_gem_phys_pwrite(struct drm_device *dev,
				struct drm_i915_gem_object *obj,
54
				struct drm_i915_gem_pwrite *args,
55 56
				struct drm_file *file);
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
57

58 59 60 61
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
				    int nr_to_scan,
				    gfp_t gfp_mask);

62

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
int
i915_gem_check_is_wedged(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct completion *x = &dev_priv->error_completion;
	unsigned long flags;
	int ret;

	if (!atomic_read(&dev_priv->mm.wedged))
		return 0;

	ret = wait_for_completion_interruptible(x);
	if (ret)
		return ret;

	/* Success, we reset the GPU! */
	if (!atomic_read(&dev_priv->mm.wedged))
		return 0;

	/* GPU is hung, bump the completion count to account for
	 * the token we just consumed so that we never hit zero and
	 * end up waiting upon a subsequent completion event that
	 * will never happen.
	 */
	spin_lock_irqsave(&x->wait.lock, flags);
	x->done++;
	spin_unlock_irqrestore(&x->wait.lock, flags);
	return -EIO;
}

108
int i915_mutex_lock_interruptible(struct drm_device *dev)
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = i915_gem_check_is_wedged(dev);
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	if (atomic_read(&dev_priv->mm.wedged)) {
		mutex_unlock(&dev->struct_mutex);
		return -EAGAIN;
	}

126
	WARN_ON(i915_verify_lists(dev));
127 128
	return 0;
}
129

130
static inline bool
131
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
132
{
133
	return obj->gtt_space && !obj->active && obj->pin_count == 0;
134 135
}

136 137 138 139
void i915_gem_do_init(struct drm_device *dev,
		      unsigned long start,
		      unsigned long mappable_end,
		      unsigned long end)
140 141 142
{
	drm_i915_private_t *dev_priv = dev->dev_private;

J
Jesse Barnes 已提交
143 144
	drm_mm_init(&dev_priv->mm.gtt_space, start,
		    end - start);
145

146
	dev_priv->mm.gtt_total = end - start;
147
	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
D
Daniel Vetter 已提交
148
	dev_priv->mm.gtt_mappable_end = mappable_end;
J
Jesse Barnes 已提交
149
}
150

J
Jesse Barnes 已提交
151 152
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
153
		    struct drm_file *file)
J
Jesse Barnes 已提交
154 155
{
	struct drm_i915_gem_init *args = data;
156 157 158 159

	if (args->gtt_start >= args->gtt_end ||
	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
		return -EINVAL;
J
Jesse Barnes 已提交
160 161

	mutex_lock(&dev->struct_mutex);
162
	i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
163 164
	mutex_unlock(&dev->struct_mutex);

165
	return 0;
166 167
}

168 169
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
170
			    struct drm_file *file)
171
{
172
	struct drm_i915_private *dev_priv = dev->dev_private;
173
	struct drm_i915_gem_get_aperture *args = data;
174 175
	struct drm_i915_gem_object *obj;
	size_t pinned;
176 177 178 179

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

180
	pinned = 0;
181
	mutex_lock(&dev->struct_mutex);
182 183
	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
		pinned += obj->gtt_space->size;
184
	mutex_unlock(&dev->struct_mutex);
185

186 187 188
	args->aper_size = dev_priv->mm.gtt_total;
	args->aper_available_size = args->aper_size -pinned;

189 190 191
	return 0;
}

192 193 194 195 196
/**
 * Creates a new mm object and returns a handle to it.
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
197
		      struct drm_file *file)
198 199
{
	struct drm_i915_gem_create *args = data;
200
	struct drm_i915_gem_object *obj;
201 202
	int ret;
	u32 handle;
203 204 205 206

	args->size = roundup(args->size, PAGE_SIZE);

	/* Allocate the new object */
207
	obj = i915_gem_alloc_object(dev, args->size);
208 209 210
	if (obj == NULL)
		return -ENOMEM;

211
	ret = drm_gem_handle_create(file, &obj->base, &handle);
212
	if (ret) {
213 214
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
215
		kfree(obj);
216
		return ret;
217
	}
218

219
	/* drop reference from allocate - handle holds it now */
220
	drm_gem_object_unreference(&obj->base);
221 222
	trace_i915_gem_object_create(obj);

223
	args->handle = handle;
224 225 226
	return 0;
}

227
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
228
{
229
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
230 231

	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
232
		obj->tiling_mode != I915_TILING_NONE;
233 234
}

235
static inline void
236 237 238 239 240 241 242 243
slow_shmem_copy(struct page *dst_page,
		int dst_offset,
		struct page *src_page,
		int src_offset,
		int length)
{
	char *dst_vaddr, *src_vaddr;

244 245
	dst_vaddr = kmap(dst_page);
	src_vaddr = kmap(src_page);
246 247 248

	memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);

249 250
	kunmap(src_page);
	kunmap(dst_page);
251 252
}

253
static inline void
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
slow_shmem_bit17_copy(struct page *gpu_page,
		      int gpu_offset,
		      struct page *cpu_page,
		      int cpu_offset,
		      int length,
		      int is_read)
{
	char *gpu_vaddr, *cpu_vaddr;

	/* Use the unswizzled path if this page isn't affected. */
	if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
		if (is_read)
			return slow_shmem_copy(cpu_page, cpu_offset,
					       gpu_page, gpu_offset, length);
		else
			return slow_shmem_copy(gpu_page, gpu_offset,
					       cpu_page, cpu_offset, length);
	}

273 274
	gpu_vaddr = kmap(gpu_page);
	cpu_vaddr = kmap(cpu_page);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

	/* Copy the data, XORing A6 with A17 (1). The user already knows he's
	 * XORing with the other bits (A9 for Y, A9 and A10 for X)
	 */
	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		if (is_read) {
			memcpy(cpu_vaddr + cpu_offset,
			       gpu_vaddr + swizzled_gpu_offset,
			       this_length);
		} else {
			memcpy(gpu_vaddr + swizzled_gpu_offset,
			       cpu_vaddr + cpu_offset,
			       this_length);
		}
		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

298 299
	kunmap(cpu_page);
	kunmap(gpu_page);
300 301
}

302 303 304 305 306 307
/**
 * This is the fast shmem pread path, which attempts to copy_from_user directly
 * from the backing pages of the object to the user's address space.  On a
 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
 */
static int
308 309
i915_gem_shmem_pread_fast(struct drm_device *dev,
			  struct drm_i915_gem_object *obj,
310
			  struct drm_i915_gem_pread *args,
311
			  struct drm_file *file)
312
{
313
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
314
	ssize_t remain;
315
	loff_t offset;
316 317 318 319 320 321 322 323 324
	char __user *user_data;
	int page_offset, page_length;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;

	offset = args->offset;

	while (remain > 0) {
325 326 327 328
		struct page *page;
		char *vaddr;
		int ret;

329 330 331 332 333 334 335 336 337 338
		/* Operation in this page
		 *
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		page_offset = offset & (PAGE_SIZE-1);
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

339 340 341 342 343 344 345 346 347 348 349 350 351 352
		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page))
			return PTR_ERR(page);

		vaddr = kmap_atomic(page);
		ret = __copy_to_user_inatomic(user_data,
					      vaddr + page_offset,
					      page_length);
		kunmap_atomic(vaddr);

		mark_page_accessed(page);
		page_cache_release(page);
		if (ret)
353
			return -EFAULT;
354 355 356 357 358 359

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

360
	return 0;
361 362 363 364 365 366 367 368 369
}

/**
 * This is the fallback shmem pread path, which allocates temporary storage
 * in kernel space to copy_to_user into outside of the struct_mutex, so we
 * can copy out of the object's backing pages while holding the struct mutex
 * and not take page faults.
 */
static int
370 371
i915_gem_shmem_pread_slow(struct drm_device *dev,
			  struct drm_i915_gem_object *obj,
372
			  struct drm_i915_gem_pread *args,
373
			  struct drm_file *file)
374
{
375
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
376 377 378 379 380
	struct mm_struct *mm = current->mm;
	struct page **user_pages;
	ssize_t remain;
	loff_t offset, pinned_pages, i;
	loff_t first_data_page, last_data_page, num_pages;
381 382
	int shmem_page_offset;
	int data_page_index, data_page_offset;
383 384 385
	int page_length;
	int ret;
	uint64_t data_ptr = args->data_ptr;
386
	int do_bit17_swizzling;
387 388 389 390 391 392 393 394 395 396 397

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, yet we want to hold it while
	 * dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

398
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
399 400 401
	if (user_pages == NULL)
		return -ENOMEM;

402
	mutex_unlock(&dev->struct_mutex);
403 404
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
405
				      num_pages, 1, 0, user_pages, NULL);
406
	up_read(&mm->mmap_sem);
407
	mutex_lock(&dev->struct_mutex);
408 409
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
410
		goto out;
411 412
	}

413 414 415
	ret = i915_gem_object_set_cpu_read_domain_range(obj,
							args->offset,
							args->size);
416
	if (ret)
417
		goto out;
418

419
	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
420 421 422 423

	offset = args->offset;

	while (remain > 0) {
424 425
		struct page *page;

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
		shmem_page_offset = offset & ~PAGE_MASK;
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
		data_page_offset = data_ptr & ~PAGE_MASK;

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

443 444 445 446 447
		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page))
			return PTR_ERR(page);

448
		if (do_bit17_swizzling) {
449
			slow_shmem_bit17_copy(page,
450
					      shmem_page_offset,
451 452 453 454 455 456 457
					      user_pages[data_page_index],
					      data_page_offset,
					      page_length,
					      1);
		} else {
			slow_shmem_copy(user_pages[data_page_index],
					data_page_offset,
458
					page,
459 460
					shmem_page_offset,
					page_length);
461
		}
462

463 464 465
		mark_page_accessed(page);
		page_cache_release(page);

466 467 468 469 470
		remain -= page_length;
		data_ptr += page_length;
		offset += page_length;
	}

471
out:
472 473
	for (i = 0; i < pinned_pages; i++) {
		SetPageDirty(user_pages[i]);
474
		mark_page_accessed(user_pages[i]);
475 476
		page_cache_release(user_pages[i]);
	}
477
	drm_free_large(user_pages);
478 479 480 481

	return ret;
}

482 483 484 485 486 487 488
/**
 * Reads data from the object referenced by handle.
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
489
		     struct drm_file *file)
490 491
{
	struct drm_i915_gem_pread *args = data;
492
	struct drm_i915_gem_object *obj;
493
	int ret = 0;
494

495 496 497 498 499 500 501 502 503 504 505 506 507
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
		       (char __user *)(uintptr_t)args->data_ptr,
		       args->size))
		return -EFAULT;

	ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
				       args->size);
	if (ret)
		return -EFAULT;

508
	ret = i915_mutex_lock_interruptible(dev);
509
	if (ret)
510
		return ret;
511

512
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
513 514 515
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
516
	}
517

518
	/* Bounds check source.  */
519 520
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
521
		ret = -EINVAL;
522
		goto out;
C
Chris Wilson 已提交
523 524
	}

525 526 527 528
	ret = i915_gem_object_set_cpu_read_domain_range(obj,
							args->offset,
							args->size);
	if (ret)
529
		goto out;
530 531 532

	ret = -EFAULT;
	if (!i915_gem_object_needs_bit17_swizzle(obj))
533
		ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
534
	if (ret == -EFAULT)
535
		ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
536

537
out:
538
	drm_gem_object_unreference(&obj->base);
539
unlock:
540
	mutex_unlock(&dev->struct_mutex);
541
	return ret;
542 543
}

544 545
/* This is the fast write path which cannot handle
 * page faults in the source data
546
 */
547 548 549 550 551 552

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
553 554
{
	char *vaddr_atomic;
555
	unsigned long unwritten;
556

P
Peter Zijlstra 已提交
557
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
558 559
	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
						      user_data, length);
P
Peter Zijlstra 已提交
560
	io_mapping_unmap_atomic(vaddr_atomic);
561
	return unwritten;
562 563 564 565 566 567
}

/* Here's the write path which can sleep for
 * page faults
 */

568
static inline void
569 570 571 572
slow_kernel_write(struct io_mapping *mapping,
		  loff_t gtt_base, int gtt_offset,
		  struct page *user_page, int user_offset,
		  int length)
573
{
574 575
	char __iomem *dst_vaddr;
	char *src_vaddr;
576

577 578 579 580 581 582 583 584 585
	dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
	src_vaddr = kmap(user_page);

	memcpy_toio(dst_vaddr + gtt_offset,
		    src_vaddr + user_offset,
		    length);

	kunmap(user_page);
	io_mapping_unmap(dst_vaddr);
586 587
}

588 589 590 591
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 */
592
static int
593 594
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
595
			 struct drm_i915_gem_pwrite *args,
596
			 struct drm_file *file)
597
{
598
	drm_i915_private_t *dev_priv = dev->dev_private;
599
	ssize_t remain;
600
	loff_t offset, page_base;
601
	char __user *user_data;
602
	int page_offset, page_length;
603 604 605 606

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;

607
	offset = obj->gtt_offset + args->offset;
608 609 610 611

	while (remain > 0) {
		/* Operation in this page
		 *
612 613 614
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
615
		 */
616 617 618 619 620 621 622
		page_base = (offset & ~(PAGE_SIZE-1));
		page_offset = offset & (PAGE_SIZE-1);
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

		/* If we get a fault while copying data, then (presumably) our
623 624
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
625
		 */
626 627 628 629
		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
				    page_offset, user_data, page_length))

			return -EFAULT;
630

631 632 633
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
634 635
	}

636
	return 0;
637 638
}

639 640 641 642 643 644 645
/**
 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
 * the memory and maps it using kmap_atomic for copying.
 *
 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
 */
646
static int
647 648
i915_gem_gtt_pwrite_slow(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
649
			 struct drm_i915_gem_pwrite *args,
650
			 struct drm_file *file)
651
{
652 653 654 655 656 657 658 659
	drm_i915_private_t *dev_priv = dev->dev_private;
	ssize_t remain;
	loff_t gtt_page_base, offset;
	loff_t first_data_page, last_data_page, num_pages;
	loff_t pinned_pages, i;
	struct page **user_pages;
	struct mm_struct *mm = current->mm;
	int gtt_page_offset, data_page_offset, data_page_index, page_length;
660
	int ret;
661 662 663 664 665 666 667 668 669 670 671 672
	uint64_t data_ptr = args->data_ptr;

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, and all of the pwrite implementations
	 * want to hold it while dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

673
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
674 675 676
	if (user_pages == NULL)
		return -ENOMEM;

677
	mutex_unlock(&dev->struct_mutex);
678 679 680 681
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
				      num_pages, 0, 0, user_pages, NULL);
	up_read(&mm->mmap_sem);
682
	mutex_lock(&dev->struct_mutex);
683 684 685 686
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
		goto out_unpin_pages;
	}
687

688 689 690 691 692
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin_pages;

	ret = i915_gem_object_put_fence(obj);
693
	if (ret)
694
		goto out_unpin_pages;
695

696
	offset = obj->gtt_offset + args->offset;
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717

	while (remain > 0) {
		/* Operation in this page
		 *
		 * gtt_page_base = page offset within aperture
		 * gtt_page_offset = offset within page in aperture
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
		gtt_page_base = offset & PAGE_MASK;
		gtt_page_offset = offset & ~PAGE_MASK;
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
		data_page_offset = data_ptr & ~PAGE_MASK;

		page_length = remain;
		if ((gtt_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - gtt_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

718 719 720 721 722
		slow_kernel_write(dev_priv->mm.gtt_mapping,
				  gtt_page_base, gtt_page_offset,
				  user_pages[data_page_index],
				  data_page_offset,
				  page_length);
723 724 725 726 727 728 729 730 731

		remain -= page_length;
		offset += page_length;
		data_ptr += page_length;
	}

out_unpin_pages:
	for (i = 0; i < pinned_pages; i++)
		page_cache_release(user_pages[i]);
732
	drm_free_large(user_pages);
733 734 735 736

	return ret;
}

737 738 739 740
/**
 * This is the fast shmem pwrite path, which attempts to directly
 * copy_from_user into the kmapped pages backing the object.
 */
741
static int
742 743
i915_gem_shmem_pwrite_fast(struct drm_device *dev,
			   struct drm_i915_gem_object *obj,
744
			   struct drm_i915_gem_pwrite *args,
745
			   struct drm_file *file)
746
{
747
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
748
	ssize_t remain;
749
	loff_t offset;
750 751 752 753 754
	char __user *user_data;
	int page_offset, page_length;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;
755

756
	offset = args->offset;
757
	obj->dirty = 1;
758 759

	while (remain > 0) {
760 761 762 763
		struct page *page;
		char *vaddr;
		int ret;

764 765 766 767 768 769 770 771 772 773
		/* Operation in this page
		 *
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		page_offset = offset & (PAGE_SIZE-1);
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page))
			return PTR_ERR(page);

		vaddr = kmap_atomic(page, KM_USER0);
		ret = __copy_from_user_inatomic(vaddr + page_offset,
						user_data,
						page_length);
		kunmap_atomic(vaddr, KM_USER0);

		set_page_dirty(page);
		mark_page_accessed(page);
		page_cache_release(page);

		/* If we get a fault while copying data, then (presumably) our
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
		 */
		if (ret)
794
			return -EFAULT;
795 796 797 798 799 800

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

801
	return 0;
802 803 804 805 806 807 808 809 810 811
}

/**
 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
 * the memory and maps it using kmap_atomic for copying.
 *
 * This avoids taking mmap_sem for faulting on the user's address while the
 * struct_mutex is held.
 */
static int
812 813
i915_gem_shmem_pwrite_slow(struct drm_device *dev,
			   struct drm_i915_gem_object *obj,
814
			   struct drm_i915_gem_pwrite *args,
815
			   struct drm_file *file)
816
{
817
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
818 819 820 821 822
	struct mm_struct *mm = current->mm;
	struct page **user_pages;
	ssize_t remain;
	loff_t offset, pinned_pages, i;
	loff_t first_data_page, last_data_page, num_pages;
823
	int shmem_page_offset;
824 825 826 827
	int data_page_index,  data_page_offset;
	int page_length;
	int ret;
	uint64_t data_ptr = args->data_ptr;
828
	int do_bit17_swizzling;
829 830 831 832 833 834 835 836 837 838 839

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, and all of the pwrite implementations
	 * want to hold it while dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

840
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
841 842 843
	if (user_pages == NULL)
		return -ENOMEM;

844
	mutex_unlock(&dev->struct_mutex);
845 846 847 848
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
				      num_pages, 0, 0, user_pages, NULL);
	up_read(&mm->mmap_sem);
849
	mutex_lock(&dev->struct_mutex);
850 851
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
852
		goto out;
853 854
	}

855
	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
856
	if (ret)
857
		goto out;
858

859
	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
860

861
	offset = args->offset;
862
	obj->dirty = 1;
863

864
	while (remain > 0) {
865 866
		struct page *page;

867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
		shmem_page_offset = offset & ~PAGE_MASK;
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
		data_page_offset = data_ptr & ~PAGE_MASK;

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

884 885 886 887 888 889 890
		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}

891
		if (do_bit17_swizzling) {
892
			slow_shmem_bit17_copy(page,
893 894 895
					      shmem_page_offset,
					      user_pages[data_page_index],
					      data_page_offset,
896 897 898
					      page_length,
					      0);
		} else {
899
			slow_shmem_copy(page,
900 901 902 903
					shmem_page_offset,
					user_pages[data_page_index],
					data_page_offset,
					page_length);
904
		}
905

906 907 908 909
		set_page_dirty(page);
		mark_page_accessed(page);
		page_cache_release(page);

910 911 912
		remain -= page_length;
		data_ptr += page_length;
		offset += page_length;
913 914
	}

915
out:
916 917
	for (i = 0; i < pinned_pages; i++)
		page_cache_release(user_pages[i]);
918
	drm_free_large(user_pages);
919

920
	return ret;
921 922 923 924 925 926 927 928 929
}

/**
 * Writes data to the object referenced by handle.
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
930
		      struct drm_file *file)
931 932
{
	struct drm_i915_gem_pwrite *args = data;
933
	struct drm_i915_gem_object *obj;
934 935 936 937 938 939 940 941 942 943 944 945 946 947
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
		       (char __user *)(uintptr_t)args->data_ptr,
		       args->size))
		return -EFAULT;

	ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
				      args->size);
	if (ret)
		return -EFAULT;
948

949
	ret = i915_mutex_lock_interruptible(dev);
950
	if (ret)
951
		return ret;
952

953
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
954 955 956
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
957
	}
958

959
	/* Bounds check destination. */
960 961
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
962
		ret = -EINVAL;
963
		goto out;
C
Chris Wilson 已提交
964 965
	}

966 967 968 969 970 971
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
972
	if (obj->phys_obj)
973
		ret = i915_gem_phys_pwrite(dev, obj, args, file);
974
	else if (obj->gtt_space &&
975
		 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
976
		ret = i915_gem_object_pin(obj, 0, true);
977 978 979
		if (ret)
			goto out;

980 981 982 983 984
		ret = i915_gem_object_set_to_gtt_domain(obj, true);
		if (ret)
			goto out_unpin;

		ret = i915_gem_object_put_fence(obj);
985 986 987 988 989 990 991 992 993
		if (ret)
			goto out_unpin;

		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
		if (ret == -EFAULT)
			ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);

out_unpin:
		i915_gem_object_unpin(obj);
994
	} else {
995 996
		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
		if (ret)
997
			goto out;
998

999 1000 1001 1002 1003 1004
		ret = -EFAULT;
		if (!i915_gem_object_needs_bit17_swizzle(obj))
			ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
		if (ret == -EFAULT)
			ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
	}
1005

1006
out:
1007
	drm_gem_object_unreference(&obj->base);
1008
unlock:
1009
	mutex_unlock(&dev->struct_mutex);
1010 1011 1012 1013
	return ret;
}

/**
1014 1015
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1016 1017 1018
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1019
			  struct drm_file *file)
1020 1021
{
	struct drm_i915_gem_set_domain *args = data;
1022
	struct drm_i915_gem_object *obj;
1023 1024
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1025 1026 1027 1028 1029
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1030
	/* Only handle setting domains to types used by the CPU. */
1031
	if (write_domain & I915_GEM_GPU_DOMAINS)
1032 1033
		return -EINVAL;

1034
	if (read_domains & I915_GEM_GPU_DOMAINS)
1035 1036 1037 1038 1039 1040 1041 1042
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1043
	ret = i915_mutex_lock_interruptible(dev);
1044
	if (ret)
1045
		return ret;
1046

1047
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1048 1049 1050
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
1051
	}
1052

1053 1054
	if (read_domains & I915_GEM_DOMAIN_GTT) {
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1055 1056 1057 1058 1059 1060 1061

		/* Silently promote "you're not bound, there was nothing to do"
		 * to success, since the client was just asking us to
		 * make sure everything was done.
		 */
		if (ret == -EINVAL)
			ret = 0;
1062
	} else {
1063
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1064 1065
	}

1066
	drm_gem_object_unreference(&obj->base);
1067
unlock:
1068 1069 1070 1071 1072 1073 1074 1075 1076
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Called when user space has done writes to this buffer
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1077
			 struct drm_file *file)
1078 1079
{
	struct drm_i915_gem_sw_finish *args = data;
1080
	struct drm_i915_gem_object *obj;
1081 1082 1083 1084 1085
	int ret = 0;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1086
	ret = i915_mutex_lock_interruptible(dev);
1087
	if (ret)
1088
		return ret;
1089

1090
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1091
	if (obj == NULL) {
1092 1093
		ret = -ENOENT;
		goto unlock;
1094 1095 1096
	}

	/* Pinned buffers may be scanout, so flush the cache */
1097
	if (obj->pin_count)
1098 1099
		i915_gem_object_flush_cpu_write_domain(obj);

1100
	drm_gem_object_unreference(&obj->base);
1101
unlock:
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Maps the contents of an object, returning the address it is mapped
 * into.
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1115
		    struct drm_file *file)
1116
{
1117
	struct drm_i915_private *dev_priv = dev->dev_private;
1118 1119 1120 1121 1122 1123 1124 1125
	struct drm_i915_gem_mmap *args = data;
	struct drm_gem_object *obj;
	loff_t offset;
	unsigned long addr;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1126
	obj = drm_gem_object_lookup(dev, file, args->handle);
1127
	if (obj == NULL)
1128
		return -ENOENT;
1129

1130 1131 1132 1133 1134
	if (obj->size > dev_priv->mm.gtt_mappable_end) {
		drm_gem_object_unreference_unlocked(obj);
		return -E2BIG;
	}

1135 1136 1137 1138 1139 1140 1141
	offset = args->offset;

	down_write(&current->mm->mmap_sem);
	addr = do_mmap(obj->filp, 0, args->size,
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
	up_write(&current->mm->mmap_sem);
1142
	drm_gem_object_unreference_unlocked(obj);
1143 1144 1145 1146 1147 1148 1149 1150
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
/**
 * i915_gem_fault - fault a page into the GTT
 * vma: VMA in question
 * vmf: fault info
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
 */
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
1169 1170
	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
	struct drm_device *dev = obj->base.dev;
1171
	drm_i915_private_t *dev_priv = dev->dev_private;
1172 1173 1174
	pgoff_t page_offset;
	unsigned long pfn;
	int ret = 0;
1175
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1176 1177 1178 1179 1180 1181 1182

	/* We don't use vmf->pgoff since that has the fake offset */
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;

	/* Now bind it into the GTT if needed */
	mutex_lock(&dev->struct_mutex);
1183

1184 1185 1186 1187
	if (!obj->map_and_fenceable) {
		ret = i915_gem_object_unbind(obj);
		if (ret)
			goto unlock;
1188
	}
1189
	if (!obj->gtt_space) {
1190
		ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1191 1192
		if (ret)
			goto unlock;
1193 1194
	}

1195 1196 1197 1198
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
		goto unlock;

1199 1200 1201 1202 1203 1204
	if (obj->tiling_mode == I915_TILING_NONE)
		ret = i915_gem_object_put_fence(obj);
	else
		ret = i915_gem_object_get_fence(obj, NULL, true);
	if (ret)
		goto unlock;
1205

1206 1207
	if (i915_gem_object_is_inactive(obj))
		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1208

1209 1210
	obj->fault_mappable = true;

1211
	pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1212 1213 1214 1215
		page_offset;

	/* Finally, remap it using the new GTT offset */
	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1216
unlock:
1217 1218 1219
	mutex_unlock(&dev->struct_mutex);

	switch (ret) {
1220 1221
	case -EAGAIN:
		set_need_resched();
1222 1223 1224
	case 0:
	case -ERESTARTSYS:
		return VM_FAULT_NOPAGE;
1225 1226 1227
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
1228
		return VM_FAULT_SIGBUS;
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
	}
}

/**
 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
 * @obj: obj in question
 *
 * GEM memory mapping works by handing back to userspace a fake mmap offset
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 * up the object based on the offset and sets up the various memory mapping
 * structures.
 *
 * This routine allocates and attaches a fake offset for @obj.
 */
static int
1244
i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1245
{
1246
	struct drm_device *dev = obj->base.dev;
1247 1248
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_map_list *list;
1249
	struct drm_local_map *map;
1250 1251 1252
	int ret = 0;

	/* Set the object up for mmap'ing */
1253
	list = &obj->base.map_list;
1254
	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1255 1256 1257 1258 1259
	if (!list->map)
		return -ENOMEM;

	map = list->map;
	map->type = _DRM_GEM;
1260
	map->size = obj->base.size;
1261 1262 1263 1264
	map->handle = obj;

	/* Get a DRM GEM mmap offset allocated... */
	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1265 1266
						    obj->base.size / PAGE_SIZE,
						    0, 0);
1267
	if (!list->file_offset_node) {
1268 1269
		DRM_ERROR("failed to allocate offset for bo %d\n",
			  obj->base.name);
1270
		ret = -ENOSPC;
1271 1272 1273 1274
		goto out_free_list;
	}

	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1275 1276
						  obj->base.size / PAGE_SIZE,
						  0);
1277 1278 1279 1280 1281 1282
	if (!list->file_offset_node) {
		ret = -ENOMEM;
		goto out_free_list;
	}

	list->hash.key = list->file_offset_node->start;
1283 1284
	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
	if (ret) {
1285 1286 1287 1288 1289 1290 1291 1292 1293
		DRM_ERROR("failed to add to map hash\n");
		goto out_free_mm;
	}

	return 0;

out_free_mm:
	drm_mm_put_block(list->file_offset_node);
out_free_list:
1294
	kfree(list->map);
C
Chris Wilson 已提交
1295
	list->map = NULL;
1296 1297 1298 1299

	return ret;
}

1300 1301 1302 1303
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1304
 * Preserve the reservation of the mmapping with the DRM core code, but
1305 1306 1307 1308 1309 1310 1311 1312 1313
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1314
void
1315
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1316
{
1317 1318
	if (!obj->fault_mappable)
		return;
1319

1320 1321 1322
	unmap_mapping_range(obj->base.dev->dev_mapping,
			    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
			    obj->base.size, 1);
1323

1324
	obj->fault_mappable = false;
1325 1326
}

1327
static void
1328
i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1329
{
1330
	struct drm_device *dev = obj->base.dev;
1331
	struct drm_gem_mm *mm = dev->mm_private;
1332
	struct drm_map_list *list = &obj->base.map_list;
1333 1334

	drm_ht_remove_item(&mm->offset_hash, &list->hash);
C
Chris Wilson 已提交
1335 1336 1337
	drm_mm_put_block(list->file_offset_node);
	kfree(list->map);
	list->map = NULL;
1338 1339
}

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
static uint32_t
i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
{
	struct drm_device *dev = obj->base.dev;
	uint32_t size;

	if (INTEL_INFO(dev)->gen >= 4 ||
	    obj->tiling_mode == I915_TILING_NONE)
		return obj->base.size;

	/* Previous chips need a power-of-two fence region when tiling */
	if (INTEL_INFO(dev)->gen == 3)
		size = 1024*1024;
	else
		size = 512*1024;

	while (size < obj->base.size)
		size <<= 1;

	return size;
}

1362 1363 1364 1365 1366
/**
 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
 * @obj: object to check
 *
 * Return the required GTT alignment for an object, taking into account
1367
 * potential fence register mapping.
1368 1369
 */
static uint32_t
1370
i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1371
{
1372
	struct drm_device *dev = obj->base.dev;
1373 1374 1375 1376 1377

	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
1378
	if (INTEL_INFO(dev)->gen >= 4 ||
1379
	    obj->tiling_mode == I915_TILING_NONE)
1380 1381
		return 4096;

1382 1383 1384 1385
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
1386
	return i915_gem_get_gtt_size(obj);
1387 1388
}

1389 1390 1391 1392 1393 1394 1395 1396 1397
/**
 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
 *					 unfenced object
 * @obj: object to check
 *
 * Return the required GTT alignment for an object, only taking into account
 * unfenced tiled surface requirements.
 */
static uint32_t
1398
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1399
{
1400
	struct drm_device *dev = obj->base.dev;
1401 1402 1403 1404 1405 1406
	int tile_height;

	/*
	 * Minimum alignment is 4k (GTT page size) for sane hw.
	 */
	if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1407
	    obj->tiling_mode == I915_TILING_NONE)
1408 1409 1410 1411 1412 1413 1414 1415
		return 4096;

	/*
	 * Older chips need unfenced tiled buffers to be aligned to the left
	 * edge of an even tile row (where tile rows are counted as if the bo is
	 * placed in a fenced gtt region).
	 */
	if (IS_GEN2(dev) ||
1416
	    (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
1417 1418 1419 1420
		tile_height = 32;
	else
		tile_height = 8;

1421
	return tile_height * obj->stride * 2;
1422 1423
}

1424 1425 1426 1427
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
1428
 * @file: GEM object info
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1441
			struct drm_file *file)
1442
{
1443
	struct drm_i915_private *dev_priv = dev->dev_private;
1444
	struct drm_i915_gem_mmap_gtt *args = data;
1445
	struct drm_i915_gem_object *obj;
1446 1447 1448 1449 1450
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1451
	ret = i915_mutex_lock_interruptible(dev);
1452
	if (ret)
1453
		return ret;
1454

1455
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1456 1457 1458 1459
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
	}
1460

1461
	if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1462 1463 1464 1465
		ret = -E2BIG;
		goto unlock;
	}

1466
	if (obj->madv != I915_MADV_WILLNEED) {
1467
		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1468 1469
		ret = -EINVAL;
		goto out;
1470 1471
	}

1472
	if (!obj->base.map_list.map) {
1473
		ret = i915_gem_create_mmap_offset(obj);
1474 1475
		if (ret)
			goto out;
1476 1477
	}

1478
	args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1479

1480
out:
1481
	drm_gem_object_unreference(&obj->base);
1482
unlock:
1483
	mutex_unlock(&dev->struct_mutex);
1484
	return ret;
1485 1486
}

1487
static int
1488
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
			      gfp_t gfpmask)
{
	int page_count, i;
	struct address_space *mapping;
	struct inode *inode;
	struct page *page;

	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 */
1499 1500 1501 1502
	page_count = obj->base.size / PAGE_SIZE;
	BUG_ON(obj->pages != NULL);
	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
	if (obj->pages == NULL)
1503 1504
		return -ENOMEM;

1505
	inode = obj->base.filp->f_path.dentry->d_inode;
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
	mapping = inode->i_mapping;
	for (i = 0; i < page_count; i++) {
		page = read_cache_page_gfp(mapping, i,
					   GFP_HIGHUSER |
					   __GFP_COLD |
					   __GFP_RECLAIMABLE |
					   gfpmask);
		if (IS_ERR(page))
			goto err_pages;

1516
		obj->pages[i] = page;
1517 1518
	}

1519
	if (obj->tiling_mode != I915_TILING_NONE)
1520 1521 1522 1523 1524 1525
		i915_gem_object_do_bit_17_swizzle(obj);

	return 0;

err_pages:
	while (i--)
1526
		page_cache_release(obj->pages[i]);
1527

1528 1529
	drm_free_large(obj->pages);
	obj->pages = NULL;
1530 1531 1532
	return PTR_ERR(page);
}

1533
static void
1534
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1535
{
1536
	int page_count = obj->base.size / PAGE_SIZE;
1537 1538
	int i;

1539
	BUG_ON(obj->madv == __I915_MADV_PURGED);
1540

1541
	if (obj->tiling_mode != I915_TILING_NONE)
1542 1543
		i915_gem_object_save_bit_17_swizzle(obj);

1544 1545
	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
1546 1547

	for (i = 0; i < page_count; i++) {
1548 1549
		if (obj->dirty)
			set_page_dirty(obj->pages[i]);
1550

1551 1552
		if (obj->madv == I915_MADV_WILLNEED)
			mark_page_accessed(obj->pages[i]);
1553

1554
		page_cache_release(obj->pages[i]);
1555
	}
1556
	obj->dirty = 0;
1557

1558 1559
	drm_free_large(obj->pages);
	obj->pages = NULL;
1560 1561
}

1562
void
1563
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1564 1565
			       struct intel_ring_buffer *ring,
			       u32 seqno)
1566
{
1567
	struct drm_device *dev = obj->base.dev;
1568
	struct drm_i915_private *dev_priv = dev->dev_private;
1569

1570
	BUG_ON(ring == NULL);
1571
	obj->ring = ring;
1572 1573

	/* Add a reference if we're newly entering the active list. */
1574 1575 1576
	if (!obj->active) {
		drm_gem_object_reference(&obj->base);
		obj->active = 1;
1577
	}
1578

1579
	/* Move from whatever list we were on to the tail of execution. */
1580 1581
	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
	list_move_tail(&obj->ring_list, &ring->active_list);
1582

1583
	obj->last_rendering_seqno = seqno;
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
	if (obj->fenced_gpu_access) {
		struct drm_i915_fence_reg *reg;

		BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);

		obj->last_fenced_seqno = seqno;
		obj->last_fenced_ring = ring;

		reg = &dev_priv->fence_regs[obj->fence_reg];
		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
	}
}

static void
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
	list_del_init(&obj->ring_list);
	obj->last_rendering_seqno = 0;
1602 1603
}

1604
static void
1605
i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1606
{
1607
	struct drm_device *dev = obj->base.dev;
1608 1609
	drm_i915_private_t *dev_priv = dev->dev_private;

1610 1611
	BUG_ON(!obj->active);
	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634

	i915_gem_object_move_off_active(obj);
}

static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (obj->pin_count != 0)
		list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
	else
		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);

	BUG_ON(!list_empty(&obj->gpu_write_list));
	BUG_ON(!obj->active);
	obj->ring = NULL;

	i915_gem_object_move_off_active(obj);
	obj->fenced_gpu_access = false;

	obj->active = 0;
1635
	obj->pending_gpu_write = false;
1636 1637 1638
	drm_gem_object_unreference(&obj->base);

	WARN_ON(i915_verify_lists(dev));
1639
}
1640

1641 1642
/* Immediately discard the backing storage */
static void
1643
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1644
{
C
Chris Wilson 已提交
1645
	struct inode *inode;
1646

1647 1648 1649 1650 1651 1652
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*. Here we mirror the actions taken
	 * when by shmem_delete_inode() to release the backing store.
	 */
1653
	inode = obj->base.filp->f_path.dentry->d_inode;
1654 1655 1656
	truncate_inode_pages(inode->i_mapping, 0);
	if (inode->i_op->truncate_range)
		inode->i_op->truncate_range(inode, 0, (loff_t)-1);
C
Chris Wilson 已提交
1657

1658
	obj->madv = __I915_MADV_PURGED;
1659 1660 1661
}

static inline int
1662
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1663
{
1664
	return obj->madv == I915_MADV_DONTNEED;
1665 1666
}

1667 1668
static void
i915_gem_process_flushing_list(struct drm_device *dev,
1669
			       uint32_t flush_domains,
1670
			       struct intel_ring_buffer *ring)
1671
{
1672
	struct drm_i915_gem_object *obj, *next;
1673

1674
	list_for_each_entry_safe(obj, next,
1675
				 &ring->gpu_write_list,
1676
				 gpu_write_list) {
1677 1678
		if (obj->base.write_domain & flush_domains) {
			uint32_t old_write_domain = obj->base.write_domain;
1679

1680 1681
			obj->base.write_domain = 0;
			list_del_init(&obj->gpu_write_list);
1682 1683
			i915_gem_object_move_to_active(obj, ring,
						       i915_gem_next_request_seqno(dev, ring));
1684 1685

			trace_i915_gem_object_change_domain(obj,
1686
							    obj->base.read_domains,
1687 1688 1689 1690
							    old_write_domain);
		}
	}
}
1691

1692
int
1693
i915_add_request(struct drm_device *dev,
1694
		 struct drm_file *file,
C
Chris Wilson 已提交
1695
		 struct drm_i915_gem_request *request,
1696
		 struct intel_ring_buffer *ring)
1697 1698
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1699
	struct drm_i915_file_private *file_priv = NULL;
1700 1701
	uint32_t seqno;
	int was_empty;
1702 1703 1704
	int ret;

	BUG_ON(request == NULL);
1705

1706 1707
	if (file != NULL)
		file_priv = file->driver_priv;
1708

1709 1710 1711
	ret = ring->add_request(ring, &seqno);
	if (ret)
	    return ret;
1712

1713
	ring->outstanding_lazy_request = false;
1714 1715

	request->seqno = seqno;
1716
	request->ring = ring;
1717
	request->emitted_jiffies = jiffies;
1718 1719 1720
	was_empty = list_empty(&ring->request_list);
	list_add_tail(&request->list, &ring->request_list);

1721
	if (file_priv) {
1722
		spin_lock(&file_priv->mm.lock);
1723
		request->file_priv = file_priv;
1724
		list_add_tail(&request->client_list,
1725
			      &file_priv->mm.request_list);
1726
		spin_unlock(&file_priv->mm.lock);
1727
	}
1728

B
Ben Gamari 已提交
1729
	if (!dev_priv->mm.suspended) {
1730 1731
		mod_timer(&dev_priv->hangcheck_timer,
			  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
B
Ben Gamari 已提交
1732
		if (was_empty)
1733 1734
			queue_delayed_work(dev_priv->wq,
					   &dev_priv->mm.retire_work, HZ);
B
Ben Gamari 已提交
1735
	}
1736
	return 0;
1737 1738
}

1739 1740
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1741
{
1742
	struct drm_i915_file_private *file_priv = request->file_priv;
1743

1744 1745
	if (!file_priv)
		return;
C
Chris Wilson 已提交
1746

1747 1748 1749 1750
	spin_lock(&file_priv->mm.lock);
	list_del(&request->client_list);
	request->file_priv = NULL;
	spin_unlock(&file_priv->mm.lock);
1751 1752
}

1753 1754
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
				      struct intel_ring_buffer *ring)
1755
{
1756 1757
	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;
1758

1759 1760 1761
		request = list_first_entry(&ring->request_list,
					   struct drm_i915_gem_request,
					   list);
1762

1763
		list_del(&request->list);
1764
		i915_gem_request_remove_from_client(request);
1765 1766
		kfree(request);
	}
1767

1768
	while (!list_empty(&ring->active_list)) {
1769
		struct drm_i915_gem_object *obj;
1770

1771 1772 1773
		obj = list_first_entry(&ring->active_list,
				       struct drm_i915_gem_object,
				       ring_list);
1774

1775 1776 1777
		obj->base.write_domain = 0;
		list_del_init(&obj->gpu_write_list);
		i915_gem_object_move_to_inactive(obj);
1778 1779 1780
	}
}

1781 1782 1783 1784 1785 1786 1787
static void i915_gem_reset_fences(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	for (i = 0; i < 16; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1788 1789 1790 1791 1792 1793 1794 1795
		struct drm_i915_gem_object *obj = reg->obj;

		if (!obj)
			continue;

		if (obj->tiling_mode)
			i915_gem_release_mmap(obj);

1796 1797 1798 1799 1800
		reg->obj->fence_reg = I915_FENCE_REG_NONE;
		reg->obj->fenced_gpu_access = false;
		reg->obj->last_fenced_seqno = 0;
		reg->obj->last_fenced_ring = NULL;
		i915_gem_clear_fence_reg(dev, reg);
1801 1802 1803
	}
}

1804
void i915_gem_reset(struct drm_device *dev)
1805
{
1806
	struct drm_i915_private *dev_priv = dev->dev_private;
1807
	struct drm_i915_gem_object *obj;
1808
	int i;
1809

1810 1811
	for (i = 0; i < I915_NUM_RINGS; i++)
		i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1812 1813 1814 1815 1816 1817

	/* Remove anything from the flushing lists. The GPU cache is likely
	 * to be lost on reset along with the data, so simply move the
	 * lost bo to the inactive list.
	 */
	while (!list_empty(&dev_priv->mm.flushing_list)) {
1818 1819 1820
		obj= list_first_entry(&dev_priv->mm.flushing_list,
				      struct drm_i915_gem_object,
				      mm_list);
1821

1822 1823 1824
		obj->base.write_domain = 0;
		list_del_init(&obj->gpu_write_list);
		i915_gem_object_move_to_inactive(obj);
1825 1826 1827 1828 1829
	}

	/* Move everything out of the GPU domains to ensure we do any
	 * necessary invalidation upon reuse.
	 */
1830
	list_for_each_entry(obj,
1831
			    &dev_priv->mm.inactive_list,
1832
			    mm_list)
1833
	{
1834
		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1835
	}
1836 1837

	/* The fence registers are invalidated so clear them out */
1838
	i915_gem_reset_fences(dev);
1839 1840 1841 1842 1843
}

/**
 * This function clears the request list as sequence numbers are passed.
 */
1844 1845 1846
static void
i915_gem_retire_requests_ring(struct drm_device *dev,
			      struct intel_ring_buffer *ring)
1847 1848 1849
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t seqno;
1850
	int i;
1851

1852 1853
	if (!ring->status_page.page_addr ||
	    list_empty(&ring->request_list))
1854 1855
		return;

1856
	WARN_ON(i915_verify_lists(dev));
1857

1858
	seqno = ring->get_seqno(ring);
1859 1860 1861 1862 1863

	for (i = 0; i < I915_NUM_RINGS; i++)
		if (seqno >= ring->sync_seqno[i])
			ring->sync_seqno[i] = 0;

1864
	while (!list_empty(&ring->request_list)) {
1865 1866
		struct drm_i915_gem_request *request;

1867
		request = list_first_entry(&ring->request_list,
1868 1869 1870
					   struct drm_i915_gem_request,
					   list);

1871
		if (!i915_seqno_passed(seqno, request->seqno))
1872 1873 1874 1875 1876
			break;

		trace_i915_gem_request_retire(dev, request->seqno);

		list_del(&request->list);
1877
		i915_gem_request_remove_from_client(request);
1878 1879
		kfree(request);
	}
1880

1881 1882 1883 1884
	/* Move any buffers on the active list that are no longer referenced
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
	 */
	while (!list_empty(&ring->active_list)) {
1885
		struct drm_i915_gem_object *obj;
1886

1887 1888 1889
		obj= list_first_entry(&ring->active_list,
				      struct drm_i915_gem_object,
				      ring_list);
1890

1891
		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1892
			break;
1893

1894
		if (obj->base.write_domain != 0)
1895 1896 1897
			i915_gem_object_move_to_flushing(obj);
		else
			i915_gem_object_move_to_inactive(obj);
1898
	}
1899 1900 1901

	if (unlikely (dev_priv->trace_irq_seqno &&
		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1902
		ring->irq_put(ring);
1903 1904
		dev_priv->trace_irq_seqno = 0;
	}
1905 1906

	WARN_ON(i915_verify_lists(dev));
1907 1908
}

1909 1910 1911 1912
void
i915_gem_retire_requests(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1913
	int i;
1914

1915
	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1916
	    struct drm_i915_gem_object *obj, *next;
1917 1918 1919 1920 1921 1922

	    /* We must be careful that during unbind() we do not
	     * accidentally infinitely recurse into retire requests.
	     * Currently:
	     *   retire -> free -> unbind -> wait -> retire_ring
	     */
1923
	    list_for_each_entry_safe(obj, next,
1924
				     &dev_priv->mm.deferred_free_list,
1925
				     mm_list)
1926
		    i915_gem_free_object_tail(obj);
1927 1928
	}

1929 1930
	for (i = 0; i < I915_NUM_RINGS; i++)
		i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
1931 1932
}

1933
static void
1934 1935 1936 1937 1938 1939 1940 1941 1942
i915_gem_retire_work_handler(struct work_struct *work)
{
	drm_i915_private_t *dev_priv;
	struct drm_device *dev;

	dev_priv = container_of(work, drm_i915_private_t,
				mm.retire_work.work);
	dev = dev_priv->dev;

1943 1944 1945 1946 1947 1948
	/* Come back later if the device is busy... */
	if (!mutex_trylock(&dev->struct_mutex)) {
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
		return;
	}

1949
	i915_gem_retire_requests(dev);
1950

1951
	if (!dev_priv->mm.suspended &&
1952 1953 1954
		(!list_empty(&dev_priv->ring[RCS].request_list) ||
		 !list_empty(&dev_priv->ring[VCS].request_list) ||
		 !list_empty(&dev_priv->ring[BCS].request_list)))
1955
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1956 1957 1958
	mutex_unlock(&dev->struct_mutex);
}

1959
int
1960
i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1961
		     bool interruptible, struct intel_ring_buffer *ring)
1962 1963
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1964
	u32 ier;
1965 1966 1967 1968
	int ret = 0;

	BUG_ON(seqno == 0);

1969
	if (atomic_read(&dev_priv->mm.wedged))
1970 1971
		return -EAGAIN;

1972
	if (seqno == ring->outstanding_lazy_request) {
1973 1974 1975 1976
		struct drm_i915_gem_request *request;

		request = kzalloc(sizeof(*request), GFP_KERNEL);
		if (request == NULL)
1977
			return -ENOMEM;
1978 1979 1980 1981 1982 1983 1984 1985

		ret = i915_add_request(dev, NULL, request, ring);
		if (ret) {
			kfree(request);
			return ret;
		}

		seqno = request->seqno;
1986
	}
1987

1988
	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
1989
		if (HAS_PCH_SPLIT(dev))
1990 1991 1992
			ier = I915_READ(DEIER) | I915_READ(GTIER);
		else
			ier = I915_READ(IER);
1993 1994 1995 1996 1997 1998 1999
		if (!ier) {
			DRM_ERROR("something (likely vbetool) disabled "
				  "interrupts, re-enabling\n");
			i915_driver_irq_preinstall(dev);
			i915_driver_irq_postinstall(dev);
		}

C
Chris Wilson 已提交
2000 2001
		trace_i915_gem_request_wait_begin(dev, seqno);

2002
		ring->waiting_seqno = seqno;
2003
		ring->irq_get(ring);
2004
		if (interruptible)
2005
			ret = wait_event_interruptible(ring->irq_queue,
2006
				i915_seqno_passed(ring->get_seqno(ring), seqno)
2007
				|| atomic_read(&dev_priv->mm.wedged));
2008
		else
2009
			wait_event(ring->irq_queue,
2010
				i915_seqno_passed(ring->get_seqno(ring), seqno)
2011
				|| atomic_read(&dev_priv->mm.wedged));
2012

2013
		ring->irq_put(ring);
2014
		ring->waiting_seqno = 0;
C
Chris Wilson 已提交
2015 2016

		trace_i915_gem_request_wait_end(dev, seqno);
2017
	}
2018
	if (atomic_read(&dev_priv->mm.wedged))
2019
		ret = -EAGAIN;
2020 2021

	if (ret && ret != -ERESTARTSYS)
2022
		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2023
			  __func__, ret, seqno, ring->get_seqno(ring),
2024
			  dev_priv->next_seqno);
2025 2026 2027 2028 2029 2030 2031

	/* Directly dispatch request retiring.  While we have the work queue
	 * to handle this, the waiter on a request often wants an associated
	 * buffer to have made it to the inactive list, and we would need
	 * a separate wait queue to handle that.
	 */
	if (ret == 0)
2032
		i915_gem_retire_requests_ring(dev, ring);
2033 2034 2035 2036

	return ret;
}

2037 2038 2039 2040 2041
/**
 * Waits for a sequence number to be signaled, and cleans up the
 * request and object lists appropriately for that event.
 */
static int
2042
i915_wait_request(struct drm_device *dev, uint32_t seqno,
2043
		  struct intel_ring_buffer *ring)
2044
{
2045
	return i915_do_wait_request(dev, seqno, 1, ring);
2046 2047
}

2048 2049 2050 2051
/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
 */
2052
int
2053
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2054
			       bool interruptible)
2055
{
2056
	struct drm_device *dev = obj->base.dev;
2057 2058
	int ret;

2059 2060
	/* This function only exists to support waiting for existing rendering,
	 * not for emitting required flushes.
2061
	 */
2062
	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2063 2064 2065 2066

	/* If there is rendering queued on the buffer being evicted, wait for
	 * it.
	 */
2067
	if (obj->active) {
2068
		ret = i915_do_wait_request(dev,
2069
					   obj->last_rendering_seqno,
2070
					   interruptible,
2071
					   obj->ring);
2072
		if (ret)
2073 2074 2075 2076 2077 2078 2079 2080 2081
			return ret;
	}

	return 0;
}

/**
 * Unbinds an object from the GTT aperture.
 */
2082
int
2083
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2084 2085 2086
{
	int ret = 0;

2087
	if (obj->gtt_space == NULL)
2088 2089
		return 0;

2090
	if (obj->pin_count != 0) {
2091 2092 2093 2094
		DRM_ERROR("Attempting to unbind pinned buffer\n");
		return -EINVAL;
	}

2095 2096 2097
	/* blow away mappings if mapped through GTT */
	i915_gem_release_mmap(obj);

2098 2099 2100 2101 2102 2103
	/* Move the object to the CPU domain to ensure that
	 * any possible CPU writes while it's not in the GTT
	 * are flushed when we go to remap it. This will
	 * also ensure that all pending GPU writes are finished
	 * before we unbind.
	 */
2104
	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2105
	if (ret == -ERESTARTSYS)
2106
		return ret;
2107 2108 2109 2110
	/* Continue on if we fail due to EIO, the GPU is hung so we
	 * should be safe and we need to cleanup or else we might
	 * cause memory corruption through use-after-free.
	 */
2111 2112
	if (ret) {
		i915_gem_clflush_object(obj);
2113
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2114
	}
2115

2116
	/* release the fence reg _after_ flushing */
2117 2118 2119
	ret = i915_gem_object_put_fence(obj);
	if (ret == -ERESTARTSYS)
		return ret;
2120

2121
	i915_gem_gtt_unbind_object(obj);
2122
	i915_gem_object_put_pages_gtt(obj);
2123

2124
	list_del_init(&obj->gtt_list);
2125
	list_del_init(&obj->mm_list);
2126
	/* Avoid an unnecessary call to unbind on rebind. */
2127
	obj->map_and_fenceable = true;
2128

2129 2130 2131
	drm_mm_put_block(obj->gtt_space);
	obj->gtt_space = NULL;
	obj->gtt_offset = 0;
2132

2133
	if (i915_gem_object_is_purgeable(obj))
2134 2135
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
2136 2137
	trace_i915_gem_object_unbind(obj);

2138
	return ret;
2139 2140
}

2141 2142 2143 2144 2145 2146 2147 2148 2149 2150
void
i915_gem_flush_ring(struct drm_device *dev,
		    struct intel_ring_buffer *ring,
		    uint32_t invalidate_domains,
		    uint32_t flush_domains)
{
	ring->flush(ring, invalidate_domains, flush_domains);
	i915_gem_process_flushing_list(dev, flush_domains, ring);
}

2151 2152 2153
static int i915_ring_idle(struct drm_device *dev,
			  struct intel_ring_buffer *ring)
{
2154
	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2155 2156
		return 0;

2157
	i915_gem_flush_ring(dev, ring,
2158 2159 2160 2161 2162 2163
			    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
	return i915_wait_request(dev,
				 i915_gem_next_request_seqno(dev, ring),
				 ring);
}

2164
int
2165 2166 2167 2168
i915_gpu_idle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	bool lists_empty;
2169
	int ret, i;
2170

2171
	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2172
		       list_empty(&dev_priv->mm.active_list));
2173 2174 2175 2176
	if (lists_empty)
		return 0;

	/* Flush everything onto the inactive list. */
2177 2178 2179 2180 2181
	for (i = 0; i < I915_NUM_RINGS; i++) {
		ret = i915_ring_idle(dev, &dev_priv->ring[i]);
		if (ret)
			return ret;
	}
2182

2183
	return 0;
2184 2185
}

2186 2187
static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
				       struct intel_ring_buffer *pipelined)
2188
{
2189
	struct drm_device *dev = obj->base.dev;
2190
	drm_i915_private_t *dev_priv = dev->dev_private;
2191 2192
	u32 size = obj->gtt_space->size;
	int regnum = obj->fence_reg;
2193 2194
	uint64_t val;

2195
	val = (uint64_t)((obj->gtt_offset + size - 4096) &
2196
			 0xfffff000) << 32;
2197 2198
	val |= obj->gtt_offset & 0xfffff000;
	val |= (uint64_t)((obj->stride / 128) - 1) <<
2199 2200
		SANDYBRIDGE_FENCE_PITCH_SHIFT;

2201
	if (obj->tiling_mode == I915_TILING_Y)
2202 2203 2204
		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
	val |= I965_FENCE_REG_VALID;

2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
	if (pipelined) {
		int ret = intel_ring_begin(pipelined, 6);
		if (ret)
			return ret;

		intel_ring_emit(pipelined, MI_NOOP);
		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
		intel_ring_emit(pipelined, (u32)val);
		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
		intel_ring_emit(pipelined, (u32)(val >> 32));
		intel_ring_advance(pipelined);
	} else
		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);

	return 0;
2221 2222
}

2223 2224
static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
				struct intel_ring_buffer *pipelined)
2225
{
2226
	struct drm_device *dev = obj->base.dev;
2227
	drm_i915_private_t *dev_priv = dev->dev_private;
2228 2229
	u32 size = obj->gtt_space->size;
	int regnum = obj->fence_reg;
2230 2231
	uint64_t val;

2232
	val = (uint64_t)((obj->gtt_offset + size - 4096) &
2233
		    0xfffff000) << 32;
2234 2235 2236
	val |= obj->gtt_offset & 0xfffff000;
	val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
	if (obj->tiling_mode == I915_TILING_Y)
2237 2238 2239
		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
	val |= I965_FENCE_REG_VALID;

2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
	if (pipelined) {
		int ret = intel_ring_begin(pipelined, 6);
		if (ret)
			return ret;

		intel_ring_emit(pipelined, MI_NOOP);
		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
		intel_ring_emit(pipelined, (u32)val);
		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
		intel_ring_emit(pipelined, (u32)(val >> 32));
		intel_ring_advance(pipelined);
	} else
		I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);

	return 0;
2256 2257
}

2258 2259
static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
				struct intel_ring_buffer *pipelined)
2260
{
2261
	struct drm_device *dev = obj->base.dev;
2262
	drm_i915_private_t *dev_priv = dev->dev_private;
2263
	u32 size = obj->gtt_space->size;
2264
	u32 fence_reg, val, pitch_val;
2265
	int tile_width;
2266

2267 2268 2269 2270 2271 2272
	if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
		 (size & -size) != size ||
		 (obj->gtt_offset & (size - 1)),
		 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
		 obj->gtt_offset, obj->map_and_fenceable, size))
		return -EINVAL;
2273

2274
	if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2275
		tile_width = 128;
2276
	else
2277 2278 2279
		tile_width = 512;

	/* Note: pitch better be a power of two tile widths */
2280
	pitch_val = obj->stride / tile_width;
2281
	pitch_val = ffs(pitch_val) - 1;
2282

2283 2284
	val = obj->gtt_offset;
	if (obj->tiling_mode == I915_TILING_Y)
2285
		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2286
	val |= I915_FENCE_SIZE_BITS(size);
2287 2288 2289
	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
	val |= I830_FENCE_REG_VALID;

2290
	fence_reg = obj->fence_reg;
2291 2292
	if (fence_reg < 8)
		fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2293
	else
2294
		fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309

	if (pipelined) {
		int ret = intel_ring_begin(pipelined, 4);
		if (ret)
			return ret;

		intel_ring_emit(pipelined, MI_NOOP);
		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(pipelined, fence_reg);
		intel_ring_emit(pipelined, val);
		intel_ring_advance(pipelined);
	} else
		I915_WRITE(fence_reg, val);

	return 0;
2310 2311
}

2312 2313
static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
				struct intel_ring_buffer *pipelined)
2314
{
2315
	struct drm_device *dev = obj->base.dev;
2316
	drm_i915_private_t *dev_priv = dev->dev_private;
2317 2318
	u32 size = obj->gtt_space->size;
	int regnum = obj->fence_reg;
2319 2320 2321
	uint32_t val;
	uint32_t pitch_val;

2322 2323 2324 2325 2326 2327
	if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
		 (size & -size) != size ||
		 (obj->gtt_offset & (size - 1)),
		 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
		 obj->gtt_offset, size))
		return -EINVAL;
2328

2329
	pitch_val = obj->stride / 128;
2330 2331
	pitch_val = ffs(pitch_val) - 1;

2332 2333
	val = obj->gtt_offset;
	if (obj->tiling_mode == I915_TILING_Y)
2334
		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2335
	val |= I830_FENCE_SIZE_BITS(size);
2336 2337 2338
	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
	val |= I830_FENCE_REG_VALID;

2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352
	if (pipelined) {
		int ret = intel_ring_begin(pipelined, 4);
		if (ret)
			return ret;

		intel_ring_emit(pipelined, MI_NOOP);
		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
		intel_ring_emit(pipelined, val);
		intel_ring_advance(pipelined);
	} else
		I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);

	return 0;
2353 2354
}

2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
	return i915_seqno_passed(ring->get_seqno(ring), seqno);
}

static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
			    struct intel_ring_buffer *pipelined,
			    bool interruptible)
{
	int ret;

	if (obj->fenced_gpu_access) {
		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
			i915_gem_flush_ring(obj->base.dev,
					    obj->last_fenced_ring,
					    0, obj->base.write_domain);

		obj->fenced_gpu_access = false;
	}

	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
		if (!ring_passed_seqno(obj->last_fenced_ring,
				       obj->last_fenced_seqno)) {
			ret = i915_do_wait_request(obj->base.dev,
						   obj->last_fenced_seqno,
						   interruptible,
						   obj->last_fenced_ring);
			if (ret)
				return ret;
		}

		obj->last_fenced_seqno = 0;
		obj->last_fenced_ring = NULL;
	}

	return 0;
}

int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
	int ret;

	if (obj->tiling_mode)
		i915_gem_release_mmap(obj);

	ret = i915_gem_object_flush_fence(obj, NULL, true);
	if (ret)
		return ret;

	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
		i915_gem_clear_fence_reg(obj->base.dev,
					 &dev_priv->fence_regs[obj->fence_reg]);

		obj->fence_reg = I915_FENCE_REG_NONE;
	}

	return 0;
}

static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device *dev,
		    struct intel_ring_buffer *pipelined)
2420 2421
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2422 2423
	struct drm_i915_fence_reg *reg, *first, *avail;
	int i;
2424 2425

	/* First try to find a free reg */
2426
	avail = NULL;
2427 2428 2429
	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
		reg = &dev_priv->fence_regs[i];
		if (!reg->obj)
2430
			return reg;
2431

2432
		if (!reg->obj->pin_count)
2433
			avail = reg;
2434 2435
	}

2436 2437
	if (avail == NULL)
		return NULL;
2438 2439

	/* None available, try to steal one or wait for a user to finish */
2440 2441 2442
	avail = first = NULL;
	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
		if (reg->obj->pin_count)
2443 2444
			continue;

2445 2446 2447 2448 2449 2450 2451 2452 2453
		if (first == NULL)
			first = reg;

		if (!pipelined ||
		    !reg->obj->last_fenced_ring ||
		    reg->obj->last_fenced_ring == pipelined) {
			avail = reg;
			break;
		}
2454 2455
	}

2456 2457
	if (avail == NULL)
		avail = first;
2458

2459
	return avail;
2460 2461
}

2462
/**
2463
 * i915_gem_object_get_fence - set up a fence reg for an object
2464
 * @obj: object to map through a fence reg
2465 2466
 * @pipelined: ring on which to queue the change, or NULL for CPU access
 * @interruptible: must we wait uninterruptibly for the register to retire?
2467 2468 2469 2470 2471 2472 2473 2474 2475 2476
 *
 * When mapping objects through the GTT, userspace wants to be able to write
 * to them without having to worry about swizzling if the object is tiled.
 *
 * This function walks the fence regs looking for a free one for @obj,
 * stealing one if it can't find any.
 *
 * It then sets up the reg based on the object's properties: address, pitch
 * and tiling format.
 */
2477
int
2478 2479 2480
i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
			  struct intel_ring_buffer *pipelined,
			  bool interruptible)
2481
{
2482
	struct drm_device *dev = obj->base.dev;
J
Jesse Barnes 已提交
2483
	struct drm_i915_private *dev_priv = dev->dev_private;
2484
	struct drm_i915_fence_reg *reg;
2485
	int ret;
2486

2487
	/* Just update our place in the LRU if our fence is getting reused. */
2488 2489
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		reg = &dev_priv->fence_regs[obj->fence_reg];
2490
		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539

		if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
			pipelined = NULL;

		if (!pipelined) {
			if (reg->setup_seqno) {
				if (!ring_passed_seqno(obj->last_fenced_ring,
						       reg->setup_seqno)) {
					ret = i915_do_wait_request(obj->base.dev,
								   reg->setup_seqno,
								   interruptible,
								   obj->last_fenced_ring);
					if (ret)
						return ret;
				}

				reg->setup_seqno = 0;
			}
		} else if (obj->last_fenced_ring &&
			   obj->last_fenced_ring != pipelined) {
			ret = i915_gem_object_flush_fence(obj,
							  pipelined,
							  interruptible);
			if (ret)
				return ret;
		} else if (obj->tiling_changed) {
			if (obj->fenced_gpu_access) {
				if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
					i915_gem_flush_ring(obj->base.dev, obj->ring,
							    0, obj->base.write_domain);

				obj->fenced_gpu_access = false;
			}
		}

		if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
			pipelined = NULL;
		BUG_ON(!pipelined && reg->setup_seqno);

		if (obj->tiling_changed) {
			if (pipelined) {
				reg->setup_seqno =
					i915_gem_next_request_seqno(dev, pipelined);
				obj->last_fenced_seqno = reg->setup_seqno;
				obj->last_fenced_ring = pipelined;
			}
			goto update;
		}

2540 2541 2542
		return 0;
	}

2543 2544 2545
	reg = i915_find_fence_reg(dev, pipelined);
	if (reg == NULL)
		return -ENOSPC;
2546

2547 2548
	ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
	if (ret)
2549
		return ret;
2550

2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578
	if (reg->obj) {
		struct drm_i915_gem_object *old = reg->obj;

		drm_gem_object_reference(&old->base);

		if (old->tiling_mode)
			i915_gem_release_mmap(old);

		/* XXX The pipelined change over appears to be incoherent. */
		ret = i915_gem_object_flush_fence(old,
						  NULL, //pipelined,
						  interruptible);
		if (ret) {
			drm_gem_object_unreference(&old->base);
			return ret;
		}

		if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
			pipelined = NULL;

		old->fence_reg = I915_FENCE_REG_NONE;
		old->last_fenced_ring = pipelined;
		old->last_fenced_seqno =
			pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;

		drm_gem_object_unreference(&old->base);
	} else if (obj->last_fenced_seqno == 0)
		pipelined = NULL;
2579

2580
	reg->obj = obj;
2581 2582 2583
	list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
	obj->fence_reg = reg - dev_priv->fence_regs;
	obj->last_fenced_ring = pipelined;
2584

2585 2586 2587 2588 2589 2590
	reg->setup_seqno =
		pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
	obj->last_fenced_seqno = reg->setup_seqno;

update:
	obj->tiling_changed = false;
2591 2592
	switch (INTEL_INFO(dev)->gen) {
	case 6:
2593
		ret = sandybridge_write_fence_reg(obj, pipelined);
2594 2595 2596
		break;
	case 5:
	case 4:
2597
		ret = i965_write_fence_reg(obj, pipelined);
2598 2599
		break;
	case 3:
2600
		ret = i915_write_fence_reg(obj, pipelined);
2601 2602
		break;
	case 2:
2603
		ret = i830_write_fence_reg(obj, pipelined);
2604 2605
		break;
	}
2606

2607
	return ret;
2608 2609 2610 2611 2612 2613 2614
}

/**
 * i915_gem_clear_fence_reg - clear out fence register info
 * @obj: object to clear
 *
 * Zeroes out the fence register itself and clears out the associated
2615
 * data structures in dev_priv and obj.
2616 2617
 */
static void
2618 2619
i915_gem_clear_fence_reg(struct drm_device *dev,
			 struct drm_i915_fence_reg *reg)
2620
{
J
Jesse Barnes 已提交
2621
	drm_i915_private_t *dev_priv = dev->dev_private;
2622
	uint32_t fence_reg = reg - dev_priv->fence_regs;
2623

2624 2625
	switch (INTEL_INFO(dev)->gen) {
	case 6:
2626
		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2627 2628 2629
		break;
	case 5:
	case 4:
2630
		I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2631 2632
		break;
	case 3:
2633 2634
		if (fence_reg >= 8)
			fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2635
		else
2636
	case 2:
2637
			fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2638 2639

		I915_WRITE(fence_reg, 0);
2640
		break;
2641
	}
2642

2643
	list_del_init(&reg->lru_list);
2644 2645
	reg->obj = NULL;
	reg->setup_seqno = 0;
2646 2647
}

2648 2649 2650 2651
/**
 * Finds free space in the GTT aperture and binds the object there.
 */
static int
2652
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2653
			    unsigned alignment,
2654
			    bool map_and_fenceable)
2655
{
2656
	struct drm_device *dev = obj->base.dev;
2657 2658
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_mm_node *free_space;
2659
	gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2660
	u32 size, fence_size, fence_alignment, unfenced_alignment;
2661
	bool mappable, fenceable;
2662
	int ret;
2663

2664
	if (obj->madv != I915_MADV_WILLNEED) {
2665 2666 2667 2668
		DRM_ERROR("Attempting to bind a purgeable object\n");
		return -EINVAL;
	}

2669 2670 2671
	fence_size = i915_gem_get_gtt_size(obj);
	fence_alignment = i915_gem_get_gtt_alignment(obj);
	unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
2672

2673
	if (alignment == 0)
2674 2675
		alignment = map_and_fenceable ? fence_alignment :
						unfenced_alignment;
2676
	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2677 2678 2679 2680
		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
		return -EINVAL;
	}

2681
	size = map_and_fenceable ? fence_size : obj->base.size;
2682

2683 2684 2685
	/* If the object is bigger than the entire aperture, reject it early
	 * before evicting everything in a vain attempt to find space.
	 */
2686
	if (obj->base.size >
2687
	    (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2688 2689 2690 2691
		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
		return -E2BIG;
	}

2692
 search_free:
2693
	if (map_and_fenceable)
2694 2695
		free_space =
			drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2696
						    size, alignment, 0,
2697 2698 2699 2700
						    dev_priv->mm.gtt_mappable_end,
						    0);
	else
		free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2701
						size, alignment, 0);
2702 2703

	if (free_space != NULL) {
2704
		if (map_and_fenceable)
2705
			obj->gtt_space =
2706
				drm_mm_get_block_range_generic(free_space,
2707
							       size, alignment, 0,
2708 2709 2710
							       dev_priv->mm.gtt_mappable_end,
							       0);
		else
2711
			obj->gtt_space =
2712
				drm_mm_get_block(free_space, size, alignment);
2713
	}
2714
	if (obj->gtt_space == NULL) {
2715 2716 2717
		/* If the gtt is empty and we're still having trouble
		 * fitting our object in, we're out of memory.
		 */
2718 2719
		ret = i915_gem_evict_something(dev, size, alignment,
					       map_and_fenceable);
2720
		if (ret)
2721
			return ret;
2722

2723 2724 2725
		goto search_free;
	}

2726
	ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2727
	if (ret) {
2728 2729
		drm_mm_put_block(obj->gtt_space);
		obj->gtt_space = NULL;
2730 2731 2732

		if (ret == -ENOMEM) {
			/* first try to clear up some space from the GTT */
2733
			ret = i915_gem_evict_something(dev, size,
2734 2735
						       alignment,
						       map_and_fenceable);
2736 2737
			if (ret) {
				/* now try to shrink everyone else */
2738 2739 2740
				if (gfpmask) {
					gfpmask = 0;
					goto search_free;
2741 2742 2743 2744 2745 2746 2747 2748
				}

				return ret;
			}

			goto search_free;
		}

2749 2750 2751
		return ret;
	}

2752 2753
	ret = i915_gem_gtt_bind_object(obj);
	if (ret) {
2754
		i915_gem_object_put_pages_gtt(obj);
2755 2756
		drm_mm_put_block(obj->gtt_space);
		obj->gtt_space = NULL;
2757

2758
		ret = i915_gem_evict_something(dev, size,
2759
					       alignment, map_and_fenceable);
2760
		if (ret)
2761 2762 2763
			return ret;

		goto search_free;
2764 2765
	}

2766
	list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2767
	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2768

2769 2770 2771 2772
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2773 2774
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2775

2776
	obj->gtt_offset = obj->gtt_space->start;
C
Chris Wilson 已提交
2777

2778
	fenceable =
2779 2780
		obj->gtt_space->size == fence_size &&
		(obj->gtt_space->start & (fence_alignment -1)) == 0;
2781

2782
	mappable =
2783
		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2784

2785
	obj->map_and_fenceable = mappable && fenceable;
2786

2787
	trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
2788 2789 2790 2791
	return 0;
}

void
2792
i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2793 2794 2795 2796 2797
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
2798
	if (obj->pages == NULL)
2799 2800
		return;

C
Chris Wilson 已提交
2801
	trace_i915_gem_object_clflush(obj);
2802

2803
	drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2804 2805
}

2806
/** Flushes any GPU write domain for the object if it's dirty. */
2807 2808
static void
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2809
{
2810
	struct drm_device *dev = obj->base.dev;
2811

2812
	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2813
		return;
2814 2815

	/* Queue the GPU write cache flushing we need. */
2816 2817
	i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
	BUG_ON(obj->base.write_domain);
2818 2819 2820 2821
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
2822
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2823
{
C
Chris Wilson 已提交
2824 2825
	uint32_t old_write_domain;

2826
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2827 2828 2829 2830 2831 2832
		return;

	/* No actual flushing is required for the GTT write domain.   Writes
	 * to it immediately go to main memory as far as we know, so there's
	 * no chipset flush.  It also doesn't land in render cache.
	 */
2833 2834
	i915_gem_release_mmap(obj);

2835 2836
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
2837 2838

	trace_i915_gem_object_change_domain(obj,
2839
					    obj->base.read_domains,
C
Chris Wilson 已提交
2840
					    old_write_domain);
2841 2842 2843 2844
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
2845
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2846
{
C
Chris Wilson 已提交
2847
	uint32_t old_write_domain;
2848

2849
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2850 2851 2852
		return;

	i915_gem_clflush_object(obj);
2853
	intel_gtt_chipset_flush();
2854 2855
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
2856 2857

	trace_i915_gem_object_change_domain(obj,
2858
					    obj->base.read_domains,
C
Chris Wilson 已提交
2859
					    old_write_domain);
2860 2861
}

2862 2863 2864 2865 2866 2867
/**
 * Moves a single object to the GTT read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
2868
int
2869
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2870
{
C
Chris Wilson 已提交
2871
	uint32_t old_write_domain, old_read_domains;
2872
	int ret;
2873

2874
	/* Not valid to be called on unbound objects. */
2875
	if (obj->gtt_space == NULL)
2876 2877
		return -EINVAL;

2878
	i915_gem_object_flush_gpu_write_domain(obj);
2879 2880 2881 2882 2883
	if (obj->pending_gpu_write || write) {
		ret = i915_gem_object_wait_rendering(obj, true);
		if (ret)
			return ret;
	}
2884

2885
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
2886

2887 2888
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
2889

2890 2891 2892
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
2893 2894
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2895
	if (write) {
2896 2897 2898
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
		obj->dirty = 1;
2899 2900
	}

C
Chris Wilson 已提交
2901 2902 2903 2904
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

2905 2906 2907
	return 0;
}

2908 2909 2910 2911 2912
/*
 * Prepare buffer for display plane. Use uninterruptible for possible flush
 * wait, as in modesetting process we're not supposed to be interrupted.
 */
int
2913
i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
2914
				     struct intel_ring_buffer *pipelined)
2915
{
2916
	uint32_t old_read_domains;
2917 2918 2919
	int ret;

	/* Not valid to be called on unbound objects. */
2920
	if (obj->gtt_space == NULL)
2921 2922
		return -EINVAL;

2923
	i915_gem_object_flush_gpu_write_domain(obj);
2924

2925 2926 2927 2928
	/* Currently, we are always called from an non-interruptible context. */
	if (!pipelined) {
		ret = i915_gem_object_wait_rendering(obj, false);
		if (ret)
2929 2930 2931
			return ret;
	}

2932 2933
	i915_gem_object_flush_cpu_write_domain(obj);

2934 2935
	old_read_domains = obj->base.read_domains;
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2936 2937 2938

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
2939
					    obj->base.write_domain);
2940 2941 2942 2943

	return 0;
}

2944 2945 2946 2947 2948 2949 2950 2951
int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
			  bool interruptible)
{
	if (!obj->active)
		return 0;

	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2952
		i915_gem_flush_ring(obj->base.dev, obj->ring,
2953 2954
				    0, obj->base.write_domain);

2955
	return i915_gem_object_wait_rendering(obj, interruptible);
2956 2957
}

2958 2959 2960 2961 2962 2963 2964
/**
 * Moves a single object to the CPU read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
static int
2965
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2966
{
C
Chris Wilson 已提交
2967
	uint32_t old_write_domain, old_read_domains;
2968 2969
	int ret;

2970
	i915_gem_object_flush_gpu_write_domain(obj);
2971 2972
	ret = i915_gem_object_wait_rendering(obj, true);
	if (ret)
2973
		return ret;
2974

2975
	i915_gem_object_flush_gtt_write_domain(obj);
2976

2977 2978
	/* If we have a partially-valid cache of the object in the CPU,
	 * finish invalidating it and free the per-page flags.
2979
	 */
2980
	i915_gem_object_set_to_full_cpu_read_domain(obj);
2981

2982 2983
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
2984

2985
	/* Flush the CPU cache if it's still invalid. */
2986
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2987 2988
		i915_gem_clflush_object(obj);

2989
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2990 2991 2992 2993 2994
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
2995
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2996 2997 2998 2999 3000

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3001 3002
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3003
	}
3004

C
Chris Wilson 已提交
3005 3006 3007 3008
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3009 3010 3011
	return 0;
}

3012
/**
3013
 * Moves the object from a partially CPU read to a full one.
3014
 *
3015 3016
 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3017
 */
3018
static void
3019
i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3020
{
3021
	if (!obj->page_cpu_valid)
3022 3023 3024 3025
		return;

	/* If we're partially in the CPU read domain, finish moving it in.
	 */
3026
	if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3027 3028
		int i;

3029 3030
		for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
			if (obj->page_cpu_valid[i])
3031
				continue;
3032
			drm_clflush_pages(obj->pages + i, 1);
3033 3034 3035 3036 3037 3038
		}
	}

	/* Free the page_cpu_valid mappings which are now stale, whether
	 * or not we've got I915_GEM_DOMAIN_CPU.
	 */
3039 3040
	kfree(obj->page_cpu_valid);
	obj->page_cpu_valid = NULL;
3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055
}

/**
 * Set the CPU read domain on a range of the object.
 *
 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
 * not entirely valid.  The page_cpu_valid member of the object flags which
 * pages have been flushed, and will be respected by
 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
 * of the whole object.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
static int
3056
i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3057 3058
					  uint64_t offset, uint64_t size)
{
C
Chris Wilson 已提交
3059
	uint32_t old_read_domains;
3060
	int i, ret;
3061

3062
	if (offset == 0 && size == obj->base.size)
3063
		return i915_gem_object_set_to_cpu_domain(obj, 0);
3064

3065
	i915_gem_object_flush_gpu_write_domain(obj);
3066 3067
	ret = i915_gem_object_wait_rendering(obj, true);
	if (ret)
3068
		return ret;
3069

3070 3071 3072
	i915_gem_object_flush_gtt_write_domain(obj);

	/* If we're already fully in the CPU read domain, we're done. */
3073 3074
	if (obj->page_cpu_valid == NULL &&
	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3075
		return 0;
3076

3077 3078 3079
	/* Otherwise, create/clear the per-page CPU read domain flag if we're
	 * newly adding I915_GEM_DOMAIN_CPU
	 */
3080 3081 3082 3083
	if (obj->page_cpu_valid == NULL) {
		obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
					      GFP_KERNEL);
		if (obj->page_cpu_valid == NULL)
3084
			return -ENOMEM;
3085 3086
	} else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
		memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3087 3088 3089 3090

	/* Flush the cache on any pages that are still invalid from the CPU's
	 * perspective.
	 */
3091 3092
	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
	     i++) {
3093
		if (obj->page_cpu_valid[i])
3094 3095
			continue;

3096
		drm_clflush_pages(obj->pages + i, 1);
3097

3098
		obj->page_cpu_valid[i] = 1;
3099 3100
	}

3101 3102 3103
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3104
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3105

3106 3107
	old_read_domains = obj->base.read_domains;
	obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3108

C
Chris Wilson 已提交
3109 3110
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3111
					    obj->base.write_domain);
C
Chris Wilson 已提交
3112

3113 3114 3115 3116 3117 3118
	return 0;
}

/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3119 3120 3121 3122
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3123 3124 3125
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3126
static int
3127
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3128
{
3129 3130
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_file_private *file_priv = file->driver_priv;
3131
	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3132 3133 3134 3135
	struct drm_i915_gem_request *request;
	struct intel_ring_buffer *ring = NULL;
	u32 seqno = 0;
	int ret;
3136

3137
	spin_lock(&file_priv->mm.lock);
3138
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3139 3140
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3141

3142 3143
		ring = request->ring;
		seqno = request->seqno;
3144
	}
3145
	spin_unlock(&file_priv->mm.lock);
3146

3147 3148
	if (seqno == 0)
		return 0;
3149

3150
	ret = 0;
3151
	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3152 3153 3154 3155 3156
		/* And wait for the seqno passing without holding any locks and
		 * causing extra latency for others. This is safe as the irq
		 * generation is designed to be run atomically and so is
		 * lockless.
		 */
3157
		ring->irq_get(ring);
3158
		ret = wait_event_interruptible(ring->irq_queue,
3159
					       i915_seqno_passed(ring->get_seqno(ring), seqno)
3160
					       || atomic_read(&dev_priv->mm.wedged));
3161
		ring->irq_put(ring);
3162

3163 3164
		if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
			ret = -EIO;
3165 3166
	}

3167 3168
	if (ret == 0)
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3169 3170 3171 3172

	return ret;
}

3173
int
3174 3175
i915_gem_object_pin(struct drm_i915_gem_object *obj,
		    uint32_t alignment,
3176
		    bool map_and_fenceable)
3177
{
3178
	struct drm_device *dev = obj->base.dev;
C
Chris Wilson 已提交
3179
	struct drm_i915_private *dev_priv = dev->dev_private;
3180 3181
	int ret;

3182
	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3183
	WARN_ON(i915_verify_lists(dev));
3184

3185 3186 3187 3188
	if (obj->gtt_space != NULL) {
		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
		    (map_and_fenceable && !obj->map_and_fenceable)) {
			WARN(obj->pin_count,
3189
			     "bo is already pinned with incorrect alignment:"
3190 3191
			     " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
			     " obj->map_and_fenceable=%d\n",
3192
			     obj->gtt_offset, alignment,
3193
			     map_and_fenceable,
3194
			     obj->map_and_fenceable);
3195 3196 3197 3198 3199 3200
			ret = i915_gem_object_unbind(obj);
			if (ret)
				return ret;
		}
	}

3201
	if (obj->gtt_space == NULL) {
3202
		ret = i915_gem_object_bind_to_gtt(obj, alignment,
3203
						  map_and_fenceable);
3204
		if (ret)
3205
			return ret;
3206
	}
J
Jesse Barnes 已提交
3207

3208 3209 3210
	if (obj->pin_count++ == 0) {
		if (!obj->active)
			list_move_tail(&obj->mm_list,
C
Chris Wilson 已提交
3211
				       &dev_priv->mm.pinned_list);
3212
	}
3213
	obj->pin_mappable |= map_and_fenceable;
3214

3215
	WARN_ON(i915_verify_lists(dev));
3216 3217 3218 3219
	return 0;
}

void
3220
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3221
{
3222
	struct drm_device *dev = obj->base.dev;
3223 3224
	drm_i915_private_t *dev_priv = dev->dev_private;

3225
	WARN_ON(i915_verify_lists(dev));
3226 3227
	BUG_ON(obj->pin_count == 0);
	BUG_ON(obj->gtt_space == NULL);
3228

3229 3230 3231
	if (--obj->pin_count == 0) {
		if (!obj->active)
			list_move_tail(&obj->mm_list,
3232
				       &dev_priv->mm.inactive_list);
3233
		obj->pin_mappable = false;
3234
	}
3235
	WARN_ON(i915_verify_lists(dev));
3236 3237 3238 3239
}

int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3240
		   struct drm_file *file)
3241 3242
{
	struct drm_i915_gem_pin *args = data;
3243
	struct drm_i915_gem_object *obj;
3244 3245
	int ret;

3246 3247 3248
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
3249

3250
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3251
	if (obj == NULL) {
3252 3253
		ret = -ENOENT;
		goto unlock;
3254 3255
	}

3256
	if (obj->madv != I915_MADV_WILLNEED) {
C
Chris Wilson 已提交
3257
		DRM_ERROR("Attempting to pin a purgeable buffer\n");
3258 3259
		ret = -EINVAL;
		goto out;
3260 3261
	}

3262
	if (obj->pin_filp != NULL && obj->pin_filp != file) {
J
Jesse Barnes 已提交
3263 3264
		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
			  args->handle);
3265 3266
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
3267 3268
	}

3269 3270 3271
	obj->user_pin_count++;
	obj->pin_filp = file;
	if (obj->user_pin_count == 1) {
3272
		ret = i915_gem_object_pin(obj, args->alignment, true);
3273 3274
		if (ret)
			goto out;
3275 3276 3277 3278 3279
	}

	/* XXX - flush the CPU caches for pinned objects
	 * as the X server doesn't manage domains yet
	 */
3280
	i915_gem_object_flush_cpu_write_domain(obj);
3281
	args->offset = obj->gtt_offset;
3282
out:
3283
	drm_gem_object_unreference(&obj->base);
3284
unlock:
3285
	mutex_unlock(&dev->struct_mutex);
3286
	return ret;
3287 3288 3289 3290
}

int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3291
		     struct drm_file *file)
3292 3293
{
	struct drm_i915_gem_pin *args = data;
3294
	struct drm_i915_gem_object *obj;
3295
	int ret;
3296

3297 3298 3299
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
3300

3301
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3302
	if (obj == NULL) {
3303 3304
		ret = -ENOENT;
		goto unlock;
3305
	}
3306

3307
	if (obj->pin_filp != file) {
J
Jesse Barnes 已提交
3308 3309
		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
			  args->handle);
3310 3311
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
3312
	}
3313 3314 3315
	obj->user_pin_count--;
	if (obj->user_pin_count == 0) {
		obj->pin_filp = NULL;
J
Jesse Barnes 已提交
3316 3317
		i915_gem_object_unpin(obj);
	}
3318

3319
out:
3320
	drm_gem_object_unreference(&obj->base);
3321
unlock:
3322
	mutex_unlock(&dev->struct_mutex);
3323
	return ret;
3324 3325 3326 3327
}

int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3328
		    struct drm_file *file)
3329 3330
{
	struct drm_i915_gem_busy *args = data;
3331
	struct drm_i915_gem_object *obj;
3332 3333
	int ret;

3334
	ret = i915_mutex_lock_interruptible(dev);
3335
	if (ret)
3336
		return ret;
3337

3338
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3339
	if (obj == NULL) {
3340 3341
		ret = -ENOENT;
		goto unlock;
3342
	}
3343

3344 3345 3346 3347
	/* Count all active objects as busy, even if they are currently not used
	 * by the gpu. Users of this interface expect objects to eventually
	 * become non-busy without any further actions, therefore emit any
	 * necessary flushes here.
3348
	 */
3349
	args->busy = obj->active;
3350 3351 3352 3353 3354 3355
	if (args->busy) {
		/* Unconditionally flush objects, even when the gpu still uses this
		 * object. Userspace calling this function indicates that it wants to
		 * use this buffer rather sooner than later, so issuing the required
		 * flush earlier is beneficial.
		 */
3356 3357 3358
		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
			i915_gem_flush_ring(dev, obj->ring,
					    0, obj->base.write_domain);
3359 3360 3361 3362 3363 3364

		/* Update the active list for the hardware's current position.
		 * Otherwise this only updates on a delayed timer or when irqs
		 * are actually unmasked, and our working set ends up being
		 * larger than required.
		 */
3365
		i915_gem_retire_requests_ring(dev, obj->ring);
3366

3367
		args->busy = obj->active;
3368
	}
3369

3370
	drm_gem_object_unreference(&obj->base);
3371
unlock:
3372
	mutex_unlock(&dev->struct_mutex);
3373
	return ret;
3374 3375 3376 3377 3378 3379 3380 3381 3382
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
    return i915_gem_ring_throttle(dev, file_priv);
}

3383 3384 3385 3386 3387
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct drm_i915_gem_madvise *args = data;
3388
	struct drm_i915_gem_object *obj;
3389
	int ret;
3390 3391 3392 3393 3394 3395 3396 3397 3398

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

3399 3400 3401 3402
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

3403
	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3404
	if (obj == NULL) {
3405 3406
		ret = -ENOENT;
		goto unlock;
3407 3408
	}

3409
	if (obj->pin_count) {
3410 3411
		ret = -EINVAL;
		goto out;
3412 3413
	}

3414 3415
	if (obj->madv != __I915_MADV_PURGED)
		obj->madv = args->madv;
3416

3417
	/* if the object is no longer bound, discard its backing storage */
3418 3419
	if (i915_gem_object_is_purgeable(obj) &&
	    obj->gtt_space == NULL)
3420 3421
		i915_gem_object_truncate(obj);

3422
	args->retained = obj->madv != __I915_MADV_PURGED;
C
Chris Wilson 已提交
3423

3424
out:
3425
	drm_gem_object_unreference(&obj->base);
3426
unlock:
3427
	mutex_unlock(&dev->struct_mutex);
3428
	return ret;
3429 3430
}

3431 3432
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
						  size_t size)
3433
{
3434
	struct drm_i915_private *dev_priv = dev->dev_private;
3435
	struct drm_i915_gem_object *obj;
3436

3437 3438 3439
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
	if (obj == NULL)
		return NULL;
3440

3441 3442 3443 3444
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
		kfree(obj);
		return NULL;
	}
3445

3446 3447
	i915_gem_info_add_obj(dev_priv, size);

3448 3449
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3450

3451
	obj->agp_type = AGP_USER_MEMORY;
3452
	obj->base.driver_private = NULL;
3453
	obj->fence_reg = I915_FENCE_REG_NONE;
3454
	INIT_LIST_HEAD(&obj->mm_list);
D
Daniel Vetter 已提交
3455
	INIT_LIST_HEAD(&obj->gtt_list);
3456
	INIT_LIST_HEAD(&obj->ring_list);
3457
	INIT_LIST_HEAD(&obj->exec_list);
3458 3459
	INIT_LIST_HEAD(&obj->gpu_write_list);
	obj->madv = I915_MADV_WILLNEED;
3460 3461
	/* Avoid an unnecessary call to unbind on the first bind. */
	obj->map_and_fenceable = true;
3462

3463
	return obj;
3464 3465 3466 3467 3468
}

int i915_gem_init_object(struct drm_gem_object *obj)
{
	BUG();
3469

3470 3471 3472
	return 0;
}

3473
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3474
{
3475
	struct drm_device *dev = obj->base.dev;
3476 3477
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
3478

3479 3480
	ret = i915_gem_object_unbind(obj);
	if (ret == -ERESTARTSYS) {
3481
		list_move(&obj->mm_list,
3482 3483 3484
			  &dev_priv->mm.deferred_free_list);
		return;
	}
3485

3486
	if (obj->base.map_list.map)
3487
		i915_gem_free_mmap_offset(obj);
3488

3489 3490
	drm_gem_object_release(&obj->base);
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
3491

3492 3493 3494
	kfree(obj->page_cpu_valid);
	kfree(obj->bit_17);
	kfree(obj);
3495 3496
}

3497
void i915_gem_free_object(struct drm_gem_object *gem_obj)
3498
{
3499 3500
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
	struct drm_device *dev = obj->base.dev;
3501 3502 3503

	trace_i915_gem_object_destroy(obj);

3504
	while (obj->pin_count > 0)
3505 3506
		i915_gem_object_unpin(obj);

3507
	if (obj->phys_obj)
3508 3509 3510 3511 3512
		i915_gem_detach_phys_object(dev, obj);

	i915_gem_free_object_tail(obj);
}

3513 3514 3515 3516 3517
int
i915_gem_idle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
3518

3519
	mutex_lock(&dev->struct_mutex);
C
Chris Wilson 已提交
3520

3521
	if (dev_priv->mm.suspended) {
3522 3523
		mutex_unlock(&dev->struct_mutex);
		return 0;
3524 3525
	}

3526
	ret = i915_gpu_idle(dev);
3527 3528
	if (ret) {
		mutex_unlock(&dev->struct_mutex);
3529
		return ret;
3530
	}
3531

3532 3533
	/* Under UMS, be paranoid and evict. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3534
		ret = i915_gem_evict_inactive(dev, false);
3535 3536 3537 3538 3539 3540
		if (ret) {
			mutex_unlock(&dev->struct_mutex);
			return ret;
		}
	}

3541 3542
	i915_gem_reset_fences(dev);

3543 3544 3545 3546 3547
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
	 * We need to replace this with a semaphore, or something.
	 * And not confound mm.suspended!
	 */
	dev_priv->mm.suspended = 1;
3548
	del_timer_sync(&dev_priv->hangcheck_timer);
3549 3550

	i915_kernel_lost_context(dev);
3551
	i915_gem_cleanup_ringbuffer(dev);
3552

3553 3554
	mutex_unlock(&dev->struct_mutex);

3555 3556 3557
	/* Cancel the retire work handler, which should be idle now. */
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);

3558 3559 3560
	return 0;
}

3561 3562 3563 3564 3565
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
3566

3567
	ret = intel_init_render_ring_buffer(dev);
3568
	if (ret)
3569
		return ret;
3570 3571

	if (HAS_BSD(dev)) {
3572
		ret = intel_init_bsd_ring_buffer(dev);
3573 3574
		if (ret)
			goto cleanup_render_ring;
3575
	}
3576

3577 3578 3579 3580 3581 3582
	if (HAS_BLT(dev)) {
		ret = intel_init_blt_ring_buffer(dev);
		if (ret)
			goto cleanup_bsd_ring;
	}

3583 3584
	dev_priv->next_seqno = 1;

3585 3586
	return 0;

3587
cleanup_bsd_ring:
3588
	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3589
cleanup_render_ring:
3590
	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3591 3592 3593 3594 3595 3596 3597
	return ret;
}

void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
3598
	int i;
3599

3600 3601
	for (i = 0; i < I915_NUM_RINGS; i++)
		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3602 3603
}

3604 3605 3606 3607 3608
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
3609
	int ret, i;
3610

J
Jesse Barnes 已提交
3611 3612 3613
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

3614
	if (atomic_read(&dev_priv->mm.wedged)) {
3615
		DRM_ERROR("Reenabling wedged hardware, good luck\n");
3616
		atomic_set(&dev_priv->mm.wedged, 0);
3617 3618 3619
	}

	mutex_lock(&dev->struct_mutex);
3620 3621 3622
	dev_priv->mm.suspended = 0;

	ret = i915_gem_init_ringbuffer(dev);
3623 3624
	if (ret != 0) {
		mutex_unlock(&dev->struct_mutex);
3625
		return ret;
3626
	}
3627

3628
	BUG_ON(!list_empty(&dev_priv->mm.active_list));
3629 3630
	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3631 3632 3633 3634
	for (i = 0; i < I915_NUM_RINGS; i++) {
		BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
		BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
	}
3635
	mutex_unlock(&dev->struct_mutex);
3636

3637 3638 3639
	ret = drm_irq_install(dev);
	if (ret)
		goto cleanup_ringbuffer;
3640

3641
	return 0;
3642 3643 3644 3645 3646 3647 3648 3649

cleanup_ringbuffer:
	mutex_lock(&dev->struct_mutex);
	i915_gem_cleanup_ringbuffer(dev);
	dev_priv->mm.suspended = 1;
	mutex_unlock(&dev->struct_mutex);

	return ret;
3650 3651 3652 3653 3654 3655
}

int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
J
Jesse Barnes 已提交
3656 3657 3658
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

3659
	drm_irq_uninstall(dev);
3660
	return i915_gem_idle(dev);
3661 3662 3663 3664 3665 3666 3667
}

void
i915_gem_lastclose(struct drm_device *dev)
{
	int ret;

3668 3669 3670
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return;

3671 3672 3673
	ret = i915_gem_idle(dev);
	if (ret)
		DRM_ERROR("failed to idle hardware: %d\n", ret);
3674 3675
}

3676 3677 3678 3679 3680 3681 3682 3683
static void
init_ring_lists(struct intel_ring_buffer *ring)
{
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
	INIT_LIST_HEAD(&ring->gpu_write_list);
}

3684 3685 3686
void
i915_gem_load(struct drm_device *dev)
{
3687
	int i;
3688 3689
	drm_i915_private_t *dev_priv = dev->dev_private;

3690
	INIT_LIST_HEAD(&dev_priv->mm.active_list);
3691 3692
	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
C
Chris Wilson 已提交
3693
	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3694
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3695
	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
D
Daniel Vetter 已提交
3696
	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3697 3698
	for (i = 0; i < I915_NUM_RINGS; i++)
		init_ring_lists(&dev_priv->ring[i]);
3699 3700
	for (i = 0; i < 16; i++)
		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3701 3702
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
			  i915_gem_retire_work_handler);
3703
	init_completion(&dev_priv->error_completion);
3704

3705 3706 3707 3708 3709 3710 3711 3712 3713 3714
	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
	if (IS_GEN3(dev)) {
		u32 tmp = I915_READ(MI_ARB_STATE);
		if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
			/* arb state is a masked write, so set bit + bit in mask */
			tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
			I915_WRITE(MI_ARB_STATE, tmp);
		}
	}

3715
	/* Old X drivers will take 0-2 for front, back, depth buffers */
3716 3717
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		dev_priv->fence_reg_start = 3;
3718

3719
	if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3720 3721 3722 3723
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

3724
	/* Initialize fence registers to zero */
3725 3726 3727 3728 3729 3730 3731
	switch (INTEL_INFO(dev)->gen) {
	case 6:
		for (i = 0; i < 16; i++)
			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), 0);
		break;
	case 5:
	case 4:
3732 3733
		for (i = 0; i < 16; i++)
			I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
3734 3735
		break;
	case 3:
3736 3737 3738
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
			for (i = 0; i < 8; i++)
				I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
3739 3740 3741 3742
	case 2:
		for (i = 0; i < 8; i++)
			I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
		break;
3743
	}
3744
	i915_gem_detect_bit_6_swizzle(dev);
3745
	init_waitqueue_head(&dev_priv->pending_flip_queue);
3746 3747 3748 3749

	dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&dev_priv->mm.inactive_shrinker);
3750
}
3751 3752 3753 3754 3755

/*
 * Create a physically contiguous memory object for this object
 * e.g. for cursor + overlay regs
 */
3756 3757
static int i915_gem_init_phys_object(struct drm_device *dev,
				     int id, int size, int align)
3758 3759 3760 3761 3762 3763 3764 3765
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_phys_object *phys_obj;
	int ret;

	if (dev_priv->mm.phys_objs[id - 1] || !size)
		return 0;

3766
	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3767 3768 3769 3770 3771
	if (!phys_obj)
		return -ENOMEM;

	phys_obj->id = id;

3772
	phys_obj->handle = drm_pci_alloc(dev, size, align);
3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784
	if (!phys_obj->handle) {
		ret = -ENOMEM;
		goto kfree_obj;
	}
#ifdef CONFIG_X86
	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif

	dev_priv->mm.phys_objs[id - 1] = phys_obj;

	return 0;
kfree_obj:
3785
	kfree(phys_obj);
3786 3787 3788
	return ret;
}

3789
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_phys_object *phys_obj;

	if (!dev_priv->mm.phys_objs[id - 1])
		return;

	phys_obj = dev_priv->mm.phys_objs[id - 1];
	if (phys_obj->cur_obj) {
		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
	}

#ifdef CONFIG_X86
	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif
	drm_pci_free(dev, phys_obj->handle);
	kfree(phys_obj);
	dev_priv->mm.phys_objs[id - 1] = NULL;
}

void i915_gem_free_all_phys_object(struct drm_device *dev)
{
	int i;

3814
	for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3815 3816 3817 3818
		i915_gem_free_phys_object(dev, i);
}

void i915_gem_detach_phys_object(struct drm_device *dev,
3819
				 struct drm_i915_gem_object *obj)
3820
{
3821
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3822
	char *vaddr;
3823 3824 3825
	int i;
	int page_count;

3826
	if (!obj->phys_obj)
3827
		return;
3828
	vaddr = obj->phys_obj->handle->vaddr;
3829

3830
	page_count = obj->base.size / PAGE_SIZE;
3831
	for (i = 0; i < page_count; i++) {
3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844
		struct page *page = read_cache_page_gfp(mapping, i,
							GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (!IS_ERR(page)) {
			char *dst = kmap_atomic(page);
			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
			kunmap_atomic(dst);

			drm_clflush_pages(&page, 1);

			set_page_dirty(page);
			mark_page_accessed(page);
			page_cache_release(page);
		}
3845
	}
3846
	intel_gtt_chipset_flush();
3847

3848 3849
	obj->phys_obj->cur_obj = NULL;
	obj->phys_obj = NULL;
3850 3851 3852 3853
}

int
i915_gem_attach_phys_object(struct drm_device *dev,
3854
			    struct drm_i915_gem_object *obj,
3855 3856
			    int id,
			    int align)
3857
{
3858
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3859 3860 3861 3862 3863 3864 3865 3866
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret = 0;
	int page_count;
	int i;

	if (id > I915_MAX_PHYS_OBJECT)
		return -EINVAL;

3867 3868
	if (obj->phys_obj) {
		if (obj->phys_obj->id == id)
3869 3870 3871 3872 3873 3874 3875
			return 0;
		i915_gem_detach_phys_object(dev, obj);
	}

	/* create a new object */
	if (!dev_priv->mm.phys_objs[id - 1]) {
		ret = i915_gem_init_phys_object(dev, id,
3876
						obj->base.size, align);
3877
		if (ret) {
3878 3879
			DRM_ERROR("failed to init phys object %d size: %zu\n",
				  id, obj->base.size);
3880
			return ret;
3881 3882 3883 3884
		}
	}

	/* bind to the object */
3885 3886
	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
	obj->phys_obj->cur_obj = obj;
3887

3888
	page_count = obj->base.size / PAGE_SIZE;
3889 3890

	for (i = 0; i < page_count; i++) {
3891 3892 3893 3894 3895 3896 3897
		struct page *page;
		char *dst, *src;

		page = read_cache_page_gfp(mapping, i,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page))
			return PTR_ERR(page);
3898

3899
		src = kmap_atomic(page);
3900
		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3901
		memcpy(dst, src, PAGE_SIZE);
P
Peter Zijlstra 已提交
3902
		kunmap_atomic(src);
3903

3904 3905 3906
		mark_page_accessed(page);
		page_cache_release(page);
	}
3907

3908 3909 3910 3911
	return 0;
}

static int
3912 3913
i915_gem_phys_pwrite(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
3914 3915 3916
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
3917
	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
3918
	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
3919

3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
		if (unwritten)
			return -EFAULT;
	}
3933

3934
	intel_gtt_chipset_flush();
3935 3936
	return 0;
}
3937

3938
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
3939
{
3940
	struct drm_i915_file_private *file_priv = file->driver_priv;
3941 3942 3943 3944 3945

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
3946
	spin_lock(&file_priv->mm.lock);
3947 3948 3949 3950 3951 3952 3953 3954 3955
	while (!list_empty(&file_priv->mm.request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&file_priv->mm.request_list,
					   struct drm_i915_gem_request,
					   client_list);
		list_del(&request->client_list);
		request->file_priv = NULL;
	}
3956
	spin_unlock(&file_priv->mm.lock);
3957
}
3958

3959 3960 3961 3962 3963 3964 3965
static int
i915_gpu_is_active(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int lists_empty;

	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
3966
		      list_empty(&dev_priv->mm.active_list);
3967 3968 3969 3970

	return !lists_empty;
}

3971
static int
3972 3973 3974
i915_gem_inactive_shrink(struct shrinker *shrinker,
			 int nr_to_scan,
			 gfp_t gfp_mask)
3975
{
3976 3977 3978 3979 3980 3981 3982 3983 3984
	struct drm_i915_private *dev_priv =
		container_of(shrinker,
			     struct drm_i915_private,
			     mm.inactive_shrinker);
	struct drm_device *dev = dev_priv->dev;
	struct drm_i915_gem_object *obj, *next;
	int cnt;

	if (!mutex_trylock(&dev->struct_mutex))
3985
		return 0;
3986 3987 3988

	/* "fast-path" to count number of available objects */
	if (nr_to_scan == 0) {
3989 3990 3991 3992 3993 3994 3995
		cnt = 0;
		list_for_each_entry(obj,
				    &dev_priv->mm.inactive_list,
				    mm_list)
			cnt++;
		mutex_unlock(&dev->struct_mutex);
		return cnt / 100 * sysctl_vfs_cache_pressure;
3996 3997
	}

3998
rescan:
3999
	/* first scan for clean buffers */
4000
	i915_gem_retire_requests(dev);
4001

4002 4003 4004 4005
	list_for_each_entry_safe(obj, next,
				 &dev_priv->mm.inactive_list,
				 mm_list) {
		if (i915_gem_object_is_purgeable(obj)) {
4006 4007
			if (i915_gem_object_unbind(obj) == 0 &&
			    --nr_to_scan == 0)
4008
				break;
4009 4010 4011 4012
		}
	}

	/* second pass, evict/count anything still on the inactive list */
4013 4014 4015 4016
	cnt = 0;
	list_for_each_entry_safe(obj, next,
				 &dev_priv->mm.inactive_list,
				 mm_list) {
4017 4018
		if (nr_to_scan &&
		    i915_gem_object_unbind(obj) == 0)
4019
			nr_to_scan--;
4020
		else
4021 4022 4023 4024
			cnt++;
	}

	if (nr_to_scan && i915_gpu_is_active(dev)) {
4025 4026 4027 4028 4029 4030
		/*
		 * We are desperate for pages, so as a last resort, wait
		 * for the GPU to finish and discard whatever we can.
		 * This has a dramatic impact to reduce the number of
		 * OOM-killer events whilst running the GPU aggressively.
		 */
4031
		if (i915_gpu_idle(dev) == 0)
4032 4033
			goto rescan;
	}
4034 4035
	mutex_unlock(&dev->struct_mutex);
	return cnt / 100 * sysctl_vfs_cache_pressure;
4036
}