i915_gem.c 104.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
C
Chris Wilson 已提交
32
#include "i915_trace.h"
33
#include "intel_drv.h"
34
#include <linux/slab.h>
35
#include <linux/swap.h>
J
Jesse Barnes 已提交
36
#include <linux/pci.h>
37

38
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
39 40
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41 42 43 44 45
static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
							  bool write);
static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
								  uint64_t offset,
								  uint64_t size);
46
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
47 48 49
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
						    unsigned alignment,
						    bool map_and_fenceable);
50 51
static void i915_gem_clear_fence_reg(struct drm_device *dev,
				     struct drm_i915_fence_reg *reg);
52 53
static int i915_gem_phys_pwrite(struct drm_device *dev,
				struct drm_i915_gem_object *obj,
54
				struct drm_i915_gem_pwrite *args,
55 56
				struct drm_file *file);
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
57

58
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
59
				    struct shrink_control *sc);
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
}

76 77
static int
i915_gem_wait_for_error(struct drm_device *dev)
78 79 80 81 82 83 84 85 86 87 88 89 90
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct completion *x = &dev_priv->error_completion;
	unsigned long flags;
	int ret;

	if (!atomic_read(&dev_priv->mm.wedged))
		return 0;

	ret = wait_for_completion_interruptible(x);
	if (ret)
		return ret;

91 92 93 94 95 96 97 98 99 100 101
	if (atomic_read(&dev_priv->mm.wedged)) {
		/* GPU is hung, bump the completion count to account for
		 * the token we just consumed so that we never hit zero and
		 * end up waiting upon a subsequent completion event that
		 * will never happen.
		 */
		spin_lock_irqsave(&x->wait.lock, flags);
		x->done++;
		spin_unlock_irqrestore(&x->wait.lock, flags);
	}
	return 0;
102 103
}

104
int i915_mutex_lock_interruptible(struct drm_device *dev)
105 106 107
{
	int ret;

108
	ret = i915_gem_wait_for_error(dev);
109 110 111 112 113 114 115
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

116
	WARN_ON(i915_verify_lists(dev));
117 118
	return 0;
}
119

120
static inline bool
121
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
122
{
123
	return obj->gtt_space && !obj->active && obj->pin_count == 0;
124 125
}

126 127 128 129
void i915_gem_do_init(struct drm_device *dev,
		      unsigned long start,
		      unsigned long mappable_end,
		      unsigned long end)
130 131 132
{
	drm_i915_private_t *dev_priv = dev->dev_private;

133
	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
134

135 136 137
	dev_priv->mm.gtt_start = start;
	dev_priv->mm.gtt_mappable_end = mappable_end;
	dev_priv->mm.gtt_end = end;
138
	dev_priv->mm.gtt_total = end - start;
139
	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
140 141 142

	/* Take over this portion of the GTT */
	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
J
Jesse Barnes 已提交
143
}
144

J
Jesse Barnes 已提交
145 146
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
147
		    struct drm_file *file)
J
Jesse Barnes 已提交
148 149
{
	struct drm_i915_gem_init *args = data;
150 151 152 153

	if (args->gtt_start >= args->gtt_end ||
	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
		return -EINVAL;
J
Jesse Barnes 已提交
154 155

	mutex_lock(&dev->struct_mutex);
156
	i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
157 158
	mutex_unlock(&dev->struct_mutex);

159
	return 0;
160 161
}

162 163
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
164
			    struct drm_file *file)
165
{
166
	struct drm_i915_private *dev_priv = dev->dev_private;
167
	struct drm_i915_gem_get_aperture *args = data;
168 169
	struct drm_i915_gem_object *obj;
	size_t pinned;
170 171 172 173

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

174
	pinned = 0;
175
	mutex_lock(&dev->struct_mutex);
176 177
	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
		pinned += obj->gtt_space->size;
178
	mutex_unlock(&dev->struct_mutex);
179

180 181 182
	args->aper_size = dev_priv->mm.gtt_total;
	args->aper_available_size = args->aper_size -pinned;

183 184 185
	return 0;
}

186 187 188 189 190
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
191
{
192
	struct drm_i915_gem_object *obj;
193 194
	int ret;
	u32 handle;
195

196
	size = roundup(size, PAGE_SIZE);
197 198

	/* Allocate the new object */
199
	obj = i915_gem_alloc_object(dev, size);
200 201 202
	if (obj == NULL)
		return -ENOMEM;

203
	ret = drm_gem_handle_create(file, &obj->base, &handle);
204
	if (ret) {
205 206
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
207
		kfree(obj);
208
		return ret;
209
	}
210

211
	/* drop reference from allocate - handle holds it now */
212
	drm_gem_object_unreference(&obj->base);
213 214
	trace_i915_gem_object_create(obj);

215
	*handle_p = handle;
216 217 218
	return 0;
}

219 220 221 222 223 224
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
225
	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
			       args->size, &args->handle);
}

int i915_gem_dumb_destroy(struct drm_file *file,
			  struct drm_device *dev,
			  uint32_t handle)
{
	return drm_gem_handle_delete(file, handle);
}

/**
 * Creates a new mm object and returns a handle to it.
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
	return i915_gem_create(file, dev,
			       args->size, &args->handle);
}

250
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
251
{
252
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
253 254

	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
255
		obj->tiling_mode != I915_TILING_NONE;
256 257
}

258
static inline void
259 260 261 262 263 264 265 266
slow_shmem_copy(struct page *dst_page,
		int dst_offset,
		struct page *src_page,
		int src_offset,
		int length)
{
	char *dst_vaddr, *src_vaddr;

267 268
	dst_vaddr = kmap(dst_page);
	src_vaddr = kmap(src_page);
269 270 271

	memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);

272 273
	kunmap(src_page);
	kunmap(dst_page);
274 275
}

276
static inline void
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
slow_shmem_bit17_copy(struct page *gpu_page,
		      int gpu_offset,
		      struct page *cpu_page,
		      int cpu_offset,
		      int length,
		      int is_read)
{
	char *gpu_vaddr, *cpu_vaddr;

	/* Use the unswizzled path if this page isn't affected. */
	if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
		if (is_read)
			return slow_shmem_copy(cpu_page, cpu_offset,
					       gpu_page, gpu_offset, length);
		else
			return slow_shmem_copy(gpu_page, gpu_offset,
					       cpu_page, cpu_offset, length);
	}

296 297
	gpu_vaddr = kmap(gpu_page);
	cpu_vaddr = kmap(cpu_page);
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320

	/* Copy the data, XORing A6 with A17 (1). The user already knows he's
	 * XORing with the other bits (A9 for Y, A9 and A10 for X)
	 */
	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		if (is_read) {
			memcpy(cpu_vaddr + cpu_offset,
			       gpu_vaddr + swizzled_gpu_offset,
			       this_length);
		} else {
			memcpy(gpu_vaddr + swizzled_gpu_offset,
			       cpu_vaddr + cpu_offset,
			       this_length);
		}
		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

321 322
	kunmap(cpu_page);
	kunmap(gpu_page);
323 324
}

325 326 327 328 329 330
/**
 * This is the fast shmem pread path, which attempts to copy_from_user directly
 * from the backing pages of the object to the user's address space.  On a
 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
 */
static int
331 332
i915_gem_shmem_pread_fast(struct drm_device *dev,
			  struct drm_i915_gem_object *obj,
333
			  struct drm_i915_gem_pread *args,
334
			  struct drm_file *file)
335
{
336
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
337
	ssize_t remain;
338
	loff_t offset;
339 340 341 342 343 344 345 346 347
	char __user *user_data;
	int page_offset, page_length;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;

	offset = args->offset;

	while (remain > 0) {
348 349 350 351
		struct page *page;
		char *vaddr;
		int ret;

352 353 354 355 356
		/* Operation in this page
		 *
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
357
		page_offset = offset_in_page(offset);
358 359 360 361
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

362 363 364 365 366 367 368 369 370 371 372 373 374 375
		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page))
			return PTR_ERR(page);

		vaddr = kmap_atomic(page);
		ret = __copy_to_user_inatomic(user_data,
					      vaddr + page_offset,
					      page_length);
		kunmap_atomic(vaddr);

		mark_page_accessed(page);
		page_cache_release(page);
		if (ret)
376
			return -EFAULT;
377 378 379 380 381 382

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

383
	return 0;
384 385 386 387 388 389 390 391 392
}

/**
 * This is the fallback shmem pread path, which allocates temporary storage
 * in kernel space to copy_to_user into outside of the struct_mutex, so we
 * can copy out of the object's backing pages while holding the struct mutex
 * and not take page faults.
 */
static int
393 394
i915_gem_shmem_pread_slow(struct drm_device *dev,
			  struct drm_i915_gem_object *obj,
395
			  struct drm_i915_gem_pread *args,
396
			  struct drm_file *file)
397
{
398
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
399 400 401 402 403
	struct mm_struct *mm = current->mm;
	struct page **user_pages;
	ssize_t remain;
	loff_t offset, pinned_pages, i;
	loff_t first_data_page, last_data_page, num_pages;
404 405
	int shmem_page_offset;
	int data_page_index, data_page_offset;
406 407 408
	int page_length;
	int ret;
	uint64_t data_ptr = args->data_ptr;
409
	int do_bit17_swizzling;
410 411 412 413 414 415 416 417 418 419 420

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, yet we want to hold it while
	 * dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

421
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
422 423 424
	if (user_pages == NULL)
		return -ENOMEM;

425
	mutex_unlock(&dev->struct_mutex);
426 427
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
428
				      num_pages, 1, 0, user_pages, NULL);
429
	up_read(&mm->mmap_sem);
430
	mutex_lock(&dev->struct_mutex);
431 432
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
433
		goto out;
434 435
	}

436 437 438
	ret = i915_gem_object_set_cpu_read_domain_range(obj,
							args->offset,
							args->size);
439
	if (ret)
440
		goto out;
441

442
	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
443 444 445 446

	offset = args->offset;

	while (remain > 0) {
447 448
		struct page *page;

449 450 451 452 453 454 455
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
456
		shmem_page_offset = offset_in_page(offset);
457
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
458
		data_page_offset = offset_in_page(data_ptr);
459 460 461 462 463 464 465

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

466 467
		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
468 469 470 471
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}
472

473
		if (do_bit17_swizzling) {
474
			slow_shmem_bit17_copy(page,
475
					      shmem_page_offset,
476 477 478 479 480 481 482
					      user_pages[data_page_index],
					      data_page_offset,
					      page_length,
					      1);
		} else {
			slow_shmem_copy(user_pages[data_page_index],
					data_page_offset,
483
					page,
484 485
					shmem_page_offset,
					page_length);
486
		}
487

488 489 490
		mark_page_accessed(page);
		page_cache_release(page);

491 492 493 494 495
		remain -= page_length;
		data_ptr += page_length;
		offset += page_length;
	}

496
out:
497 498
	for (i = 0; i < pinned_pages; i++) {
		SetPageDirty(user_pages[i]);
499
		mark_page_accessed(user_pages[i]);
500 501
		page_cache_release(user_pages[i]);
	}
502
	drm_free_large(user_pages);
503 504 505 506

	return ret;
}

507 508 509 510 511 512 513
/**
 * Reads data from the object referenced by handle.
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
514
		     struct drm_file *file)
515 516
{
	struct drm_i915_gem_pread *args = data;
517
	struct drm_i915_gem_object *obj;
518
	int ret = 0;
519

520 521 522 523 524 525 526 527 528 529 530 531 532
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
		       (char __user *)(uintptr_t)args->data_ptr,
		       args->size))
		return -EFAULT;

	ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
				       args->size);
	if (ret)
		return -EFAULT;

533
	ret = i915_mutex_lock_interruptible(dev);
534
	if (ret)
535
		return ret;
536

537
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
538
	if (&obj->base == NULL) {
539 540
		ret = -ENOENT;
		goto unlock;
541
	}
542

543
	/* Bounds check source.  */
544 545
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
546
		ret = -EINVAL;
547
		goto out;
C
Chris Wilson 已提交
548 549
	}

C
Chris Wilson 已提交
550 551
	trace_i915_gem_object_pread(obj, args->offset, args->size);

552 553 554 555
	ret = i915_gem_object_set_cpu_read_domain_range(obj,
							args->offset,
							args->size);
	if (ret)
556
		goto out;
557 558 559

	ret = -EFAULT;
	if (!i915_gem_object_needs_bit17_swizzle(obj))
560
		ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
561
	if (ret == -EFAULT)
562
		ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
563

564
out:
565
	drm_gem_object_unreference(&obj->base);
566
unlock:
567
	mutex_unlock(&dev->struct_mutex);
568
	return ret;
569 570
}

571 572
/* This is the fast write path which cannot handle
 * page faults in the source data
573
 */
574 575 576 577 578 579

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
580 581
{
	char *vaddr_atomic;
582
	unsigned long unwritten;
583

P
Peter Zijlstra 已提交
584
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
585 586
	unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
						      user_data, length);
P
Peter Zijlstra 已提交
587
	io_mapping_unmap_atomic(vaddr_atomic);
588
	return unwritten;
589 590 591 592 593 594
}

/* Here's the write path which can sleep for
 * page faults
 */

595
static inline void
596 597 598 599
slow_kernel_write(struct io_mapping *mapping,
		  loff_t gtt_base, int gtt_offset,
		  struct page *user_page, int user_offset,
		  int length)
600
{
601 602
	char __iomem *dst_vaddr;
	char *src_vaddr;
603

604 605 606 607 608 609 610 611 612
	dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
	src_vaddr = kmap(user_page);

	memcpy_toio(dst_vaddr + gtt_offset,
		    src_vaddr + user_offset,
		    length);

	kunmap(user_page);
	io_mapping_unmap(dst_vaddr);
613 614
}

615 616 617 618
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 */
619
static int
620 621
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
622
			 struct drm_i915_gem_pwrite *args,
623
			 struct drm_file *file)
624
{
625
	drm_i915_private_t *dev_priv = dev->dev_private;
626
	ssize_t remain;
627
	loff_t offset, page_base;
628
	char __user *user_data;
629
	int page_offset, page_length;
630 631 632 633

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;

634
	offset = obj->gtt_offset + args->offset;
635 636 637 638

	while (remain > 0) {
		/* Operation in this page
		 *
639 640 641
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
642
		 */
643 644
		page_base = offset & PAGE_MASK;
		page_offset = offset_in_page(offset);
645 646 647 648 649
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

		/* If we get a fault while copying data, then (presumably) our
650 651
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
652
		 */
653 654 655
		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
				    page_offset, user_data, page_length))
			return -EFAULT;
656

657 658 659
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
660 661
	}

662
	return 0;
663 664
}

665 666 667 668 669 670 671
/**
 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
 * the memory and maps it using kmap_atomic for copying.
 *
 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
 */
672
static int
673 674
i915_gem_gtt_pwrite_slow(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
675
			 struct drm_i915_gem_pwrite *args,
676
			 struct drm_file *file)
677
{
678 679 680 681 682 683 684 685
	drm_i915_private_t *dev_priv = dev->dev_private;
	ssize_t remain;
	loff_t gtt_page_base, offset;
	loff_t first_data_page, last_data_page, num_pages;
	loff_t pinned_pages, i;
	struct page **user_pages;
	struct mm_struct *mm = current->mm;
	int gtt_page_offset, data_page_offset, data_page_index, page_length;
686
	int ret;
687 688 689 690 691 692 693 694 695 696 697 698
	uint64_t data_ptr = args->data_ptr;

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, and all of the pwrite implementations
	 * want to hold it while dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

699
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
700 701 702
	if (user_pages == NULL)
		return -ENOMEM;

703
	mutex_unlock(&dev->struct_mutex);
704 705 706 707
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
				      num_pages, 0, 0, user_pages, NULL);
	up_read(&mm->mmap_sem);
708
	mutex_lock(&dev->struct_mutex);
709 710 711 712
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
		goto out_unpin_pages;
	}
713

714 715 716 717 718
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin_pages;

	ret = i915_gem_object_put_fence(obj);
719
	if (ret)
720
		goto out_unpin_pages;
721

722
	offset = obj->gtt_offset + args->offset;
723 724 725 726 727 728 729 730 731 732 733

	while (remain > 0) {
		/* Operation in this page
		 *
		 * gtt_page_base = page offset within aperture
		 * gtt_page_offset = offset within page in aperture
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
		gtt_page_base = offset & PAGE_MASK;
734
		gtt_page_offset = offset_in_page(offset);
735
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
736
		data_page_offset = offset_in_page(data_ptr);
737 738 739 740 741 742 743

		page_length = remain;
		if ((gtt_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - gtt_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

744 745 746 747 748
		slow_kernel_write(dev_priv->mm.gtt_mapping,
				  gtt_page_base, gtt_page_offset,
				  user_pages[data_page_index],
				  data_page_offset,
				  page_length);
749 750 751 752 753 754 755 756 757

		remain -= page_length;
		offset += page_length;
		data_ptr += page_length;
	}

out_unpin_pages:
	for (i = 0; i < pinned_pages; i++)
		page_cache_release(user_pages[i]);
758
	drm_free_large(user_pages);
759 760 761 762

	return ret;
}

763 764 765 766
/**
 * This is the fast shmem pwrite path, which attempts to directly
 * copy_from_user into the kmapped pages backing the object.
 */
767
static int
768 769
i915_gem_shmem_pwrite_fast(struct drm_device *dev,
			   struct drm_i915_gem_object *obj,
770
			   struct drm_i915_gem_pwrite *args,
771
			   struct drm_file *file)
772
{
773
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
774
	ssize_t remain;
775
	loff_t offset;
776 777 778 779 780
	char __user *user_data;
	int page_offset, page_length;

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;
781

782
	offset = args->offset;
783
	obj->dirty = 1;
784 785

	while (remain > 0) {
786 787 788 789
		struct page *page;
		char *vaddr;
		int ret;

790 791 792 793 794
		/* Operation in this page
		 *
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
795
		page_offset = offset_in_page(offset);
796 797 798 799
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page))
			return PTR_ERR(page);

		vaddr = kmap_atomic(page, KM_USER0);
		ret = __copy_from_user_inatomic(vaddr + page_offset,
						user_data,
						page_length);
		kunmap_atomic(vaddr, KM_USER0);

		set_page_dirty(page);
		mark_page_accessed(page);
		page_cache_release(page);

		/* If we get a fault while copying data, then (presumably) our
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
		 */
		if (ret)
820
			return -EFAULT;
821 822 823 824 825 826

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

827
	return 0;
828 829 830 831 832 833 834 835 836 837
}

/**
 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
 * the memory and maps it using kmap_atomic for copying.
 *
 * This avoids taking mmap_sem for faulting on the user's address while the
 * struct_mutex is held.
 */
static int
838 839
i915_gem_shmem_pwrite_slow(struct drm_device *dev,
			   struct drm_i915_gem_object *obj,
840
			   struct drm_i915_gem_pwrite *args,
841
			   struct drm_file *file)
842
{
843
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
844 845 846 847 848
	struct mm_struct *mm = current->mm;
	struct page **user_pages;
	ssize_t remain;
	loff_t offset, pinned_pages, i;
	loff_t first_data_page, last_data_page, num_pages;
849
	int shmem_page_offset;
850 851 852 853
	int data_page_index,  data_page_offset;
	int page_length;
	int ret;
	uint64_t data_ptr = args->data_ptr;
854
	int do_bit17_swizzling;
855 856 857 858 859 860 861 862 863 864 865

	remain = args->size;

	/* Pin the user pages containing the data.  We can't fault while
	 * holding the struct mutex, and all of the pwrite implementations
	 * want to hold it while dereferencing the user data.
	 */
	first_data_page = data_ptr / PAGE_SIZE;
	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
	num_pages = last_data_page - first_data_page + 1;

866
	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
867 868 869
	if (user_pages == NULL)
		return -ENOMEM;

870
	mutex_unlock(&dev->struct_mutex);
871 872 873 874
	down_read(&mm->mmap_sem);
	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
				      num_pages, 0, 0, user_pages, NULL);
	up_read(&mm->mmap_sem);
875
	mutex_lock(&dev->struct_mutex);
876 877
	if (pinned_pages < num_pages) {
		ret = -EFAULT;
878
		goto out;
879 880
	}

881
	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
882
	if (ret)
883
		goto out;
884

885
	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886

887
	offset = args->offset;
888
	obj->dirty = 1;
889

890
	while (remain > 0) {
891 892
		struct page *page;

893 894 895 896 897 898 899
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * data_page_index = page number in get_user_pages return
		 * data_page_offset = offset with data_page_index page.
		 * page_length = bytes to copy for this page
		 */
900
		shmem_page_offset = offset_in_page(offset);
901
		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
902
		data_page_offset = offset_in_page(data_ptr);
903 904 905 906 907 908 909

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;
		if ((data_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - data_page_offset;

910 911 912 913 914 915 916
		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}

917
		if (do_bit17_swizzling) {
918
			slow_shmem_bit17_copy(page,
919 920 921
					      shmem_page_offset,
					      user_pages[data_page_index],
					      data_page_offset,
922 923 924
					      page_length,
					      0);
		} else {
925
			slow_shmem_copy(page,
926 927 928 929
					shmem_page_offset,
					user_pages[data_page_index],
					data_page_offset,
					page_length);
930
		}
931

932 933 934 935
		set_page_dirty(page);
		mark_page_accessed(page);
		page_cache_release(page);

936 937 938
		remain -= page_length;
		data_ptr += page_length;
		offset += page_length;
939 940
	}

941
out:
942 943
	for (i = 0; i < pinned_pages; i++)
		page_cache_release(user_pages[i]);
944
	drm_free_large(user_pages);
945

946
	return ret;
947 948 949 950 951 952 953 954 955
}

/**
 * Writes data to the object referenced by handle.
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
956
		      struct drm_file *file)
957 958
{
	struct drm_i915_gem_pwrite *args = data;
959
	struct drm_i915_gem_object *obj;
960 961 962 963 964 965 966 967 968 969 970 971 972 973
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
		       (char __user *)(uintptr_t)args->data_ptr,
		       args->size))
		return -EFAULT;

	ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
				      args->size);
	if (ret)
		return -EFAULT;
974

975
	ret = i915_mutex_lock_interruptible(dev);
976
	if (ret)
977
		return ret;
978

979
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
980
	if (&obj->base == NULL) {
981 982
		ret = -ENOENT;
		goto unlock;
983
	}
984

985
	/* Bounds check destination. */
986 987
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
988
		ret = -EINVAL;
989
		goto out;
C
Chris Wilson 已提交
990 991
	}

C
Chris Wilson 已提交
992 993
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

994 995 996 997 998 999
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1000
	if (obj->phys_obj)
1001
		ret = i915_gem_phys_pwrite(dev, obj, args, file);
1002
	else if (obj->gtt_space &&
1003
		 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1004
		ret = i915_gem_object_pin(obj, 0, true);
1005 1006 1007
		if (ret)
			goto out;

1008 1009 1010 1011 1012
		ret = i915_gem_object_set_to_gtt_domain(obj, true);
		if (ret)
			goto out_unpin;

		ret = i915_gem_object_put_fence(obj);
1013 1014 1015 1016 1017 1018 1019 1020 1021
		if (ret)
			goto out_unpin;

		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
		if (ret == -EFAULT)
			ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);

out_unpin:
		i915_gem_object_unpin(obj);
1022
	} else {
1023 1024
		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
		if (ret)
1025
			goto out;
1026

1027 1028 1029 1030 1031 1032
		ret = -EFAULT;
		if (!i915_gem_object_needs_bit17_swizzle(obj))
			ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
		if (ret == -EFAULT)
			ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
	}
1033

1034
out:
1035
	drm_gem_object_unreference(&obj->base);
1036
unlock:
1037
	mutex_unlock(&dev->struct_mutex);
1038 1039 1040 1041
	return ret;
}

/**
1042 1043
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1044 1045 1046
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1047
			  struct drm_file *file)
1048 1049
{
	struct drm_i915_gem_set_domain *args = data;
1050
	struct drm_i915_gem_object *obj;
1051 1052
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1053 1054 1055 1056 1057
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1058
	/* Only handle setting domains to types used by the CPU. */
1059
	if (write_domain & I915_GEM_GPU_DOMAINS)
1060 1061
		return -EINVAL;

1062
	if (read_domains & I915_GEM_GPU_DOMAINS)
1063 1064 1065 1066 1067 1068 1069 1070
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1071
	ret = i915_mutex_lock_interruptible(dev);
1072
	if (ret)
1073
		return ret;
1074

1075
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1076
	if (&obj->base == NULL) {
1077 1078
		ret = -ENOENT;
		goto unlock;
1079
	}
1080

1081 1082
	if (read_domains & I915_GEM_DOMAIN_GTT) {
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1083 1084 1085 1086 1087 1088 1089

		/* Silently promote "you're not bound, there was nothing to do"
		 * to success, since the client was just asking us to
		 * make sure everything was done.
		 */
		if (ret == -EINVAL)
			ret = 0;
1090
	} else {
1091
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1092 1093
	}

1094
	drm_gem_object_unreference(&obj->base);
1095
unlock:
1096 1097 1098 1099 1100 1101 1102 1103 1104
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Called when user space has done writes to this buffer
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1105
			 struct drm_file *file)
1106 1107
{
	struct drm_i915_gem_sw_finish *args = data;
1108
	struct drm_i915_gem_object *obj;
1109 1110 1111 1112 1113
	int ret = 0;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1114
	ret = i915_mutex_lock_interruptible(dev);
1115
	if (ret)
1116
		return ret;
1117

1118
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1119
	if (&obj->base == NULL) {
1120 1121
		ret = -ENOENT;
		goto unlock;
1122 1123 1124
	}

	/* Pinned buffers may be scanout, so flush the cache */
1125
	if (obj->pin_count)
1126 1127
		i915_gem_object_flush_cpu_write_domain(obj);

1128
	drm_gem_object_unreference(&obj->base);
1129
unlock:
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Maps the contents of an object, returning the address it is mapped
 * into.
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1143
		    struct drm_file *file)
1144
{
1145
	struct drm_i915_private *dev_priv = dev->dev_private;
1146 1147 1148 1149 1150 1151 1152
	struct drm_i915_gem_mmap *args = data;
	struct drm_gem_object *obj;
	unsigned long addr;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1153
	obj = drm_gem_object_lookup(dev, file, args->handle);
1154
	if (obj == NULL)
1155
		return -ENOENT;
1156

1157 1158 1159 1160 1161
	if (obj->size > dev_priv->mm.gtt_mappable_end) {
		drm_gem_object_unreference_unlocked(obj);
		return -E2BIG;
	}

1162 1163 1164 1165 1166
	down_write(&current->mm->mmap_sem);
	addr = do_mmap(obj->filp, 0, args->size,
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
	up_write(&current->mm->mmap_sem);
1167
	drm_gem_object_unreference_unlocked(obj);
1168 1169 1170 1171 1172 1173 1174 1175
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
/**
 * i915_gem_fault - fault a page into the GTT
 * vma: VMA in question
 * vmf: fault info
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
 */
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
1194 1195
	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
	struct drm_device *dev = obj->base.dev;
1196
	drm_i915_private_t *dev_priv = dev->dev_private;
1197 1198 1199
	pgoff_t page_offset;
	unsigned long pfn;
	int ret = 0;
1200
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1201 1202 1203 1204 1205

	/* We don't use vmf->pgoff since that has the fake offset */
	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;

1206 1207 1208
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto out;
1209

C
Chris Wilson 已提交
1210 1211
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1212
	/* Now bind it into the GTT if needed */
1213 1214 1215 1216
	if (!obj->map_and_fenceable) {
		ret = i915_gem_object_unbind(obj);
		if (ret)
			goto unlock;
1217
	}
1218
	if (!obj->gtt_space) {
1219
		ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1220 1221
		if (ret)
			goto unlock;
1222

1223 1224 1225 1226
		ret = i915_gem_object_set_to_gtt_domain(obj, write);
		if (ret)
			goto unlock;
	}
1227

1228 1229 1230
	if (obj->tiling_mode == I915_TILING_NONE)
		ret = i915_gem_object_put_fence(obj);
	else
1231
		ret = i915_gem_object_get_fence(obj, NULL);
1232 1233
	if (ret)
		goto unlock;
1234

1235 1236
	if (i915_gem_object_is_inactive(obj))
		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1237

1238 1239
	obj->fault_mappable = true;

1240
	pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1241 1242 1243 1244
		page_offset;

	/* Finally, remap it using the new GTT offset */
	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1245
unlock:
1246
	mutex_unlock(&dev->struct_mutex);
1247
out:
1248
	switch (ret) {
1249
	case -EIO:
1250
	case -EAGAIN:
1251 1252 1253 1254 1255 1256 1257
		/* Give the error handler a chance to run and move the
		 * objects off the GPU active list. Next time we service the
		 * fault, we should be able to transition the page into the
		 * GTT without touching the GPU (and so avoid further
		 * EIO/EGAIN). If the GPU is wedged, then there is no issue
		 * with coherency, just lost writes.
		 */
1258
		set_need_resched();
1259 1260
	case 0:
	case -ERESTARTSYS:
1261
	case -EINTR:
1262
		return VM_FAULT_NOPAGE;
1263 1264 1265
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
1266
		return VM_FAULT_SIGBUS;
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
	}
}

/**
 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
 * @obj: obj in question
 *
 * GEM memory mapping works by handing back to userspace a fake mmap offset
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 * up the object based on the offset and sets up the various memory mapping
 * structures.
 *
 * This routine allocates and attaches a fake offset for @obj.
 */
static int
1282
i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1283
{
1284
	struct drm_device *dev = obj->base.dev;
1285 1286
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_map_list *list;
1287
	struct drm_local_map *map;
1288 1289 1290
	int ret = 0;

	/* Set the object up for mmap'ing */
1291
	list = &obj->base.map_list;
1292
	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1293 1294 1295 1296 1297
	if (!list->map)
		return -ENOMEM;

	map = list->map;
	map->type = _DRM_GEM;
1298
	map->size = obj->base.size;
1299 1300 1301 1302
	map->handle = obj;

	/* Get a DRM GEM mmap offset allocated... */
	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1303 1304
						    obj->base.size / PAGE_SIZE,
						    0, 0);
1305
	if (!list->file_offset_node) {
1306 1307
		DRM_ERROR("failed to allocate offset for bo %d\n",
			  obj->base.name);
1308
		ret = -ENOSPC;
1309 1310 1311 1312
		goto out_free_list;
	}

	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1313 1314
						  obj->base.size / PAGE_SIZE,
						  0);
1315 1316 1317 1318 1319 1320
	if (!list->file_offset_node) {
		ret = -ENOMEM;
		goto out_free_list;
	}

	list->hash.key = list->file_offset_node->start;
1321 1322
	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
	if (ret) {
1323 1324 1325 1326 1327 1328 1329 1330 1331
		DRM_ERROR("failed to add to map hash\n");
		goto out_free_mm;
	}

	return 0;

out_free_mm:
	drm_mm_put_block(list->file_offset_node);
out_free_list:
1332
	kfree(list->map);
C
Chris Wilson 已提交
1333
	list->map = NULL;
1334 1335 1336 1337

	return ret;
}

1338 1339 1340 1341
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1342
 * Preserve the reservation of the mmapping with the DRM core code, but
1343 1344 1345 1346 1347 1348 1349 1350 1351
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1352
void
1353
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1354
{
1355 1356
	if (!obj->fault_mappable)
		return;
1357

1358 1359 1360 1361
	if (obj->base.dev->dev_mapping)
		unmap_mapping_range(obj->base.dev->dev_mapping,
				    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
				    obj->base.size, 1);
1362

1363
	obj->fault_mappable = false;
1364 1365
}

1366
static void
1367
i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1368
{
1369
	struct drm_device *dev = obj->base.dev;
1370
	struct drm_gem_mm *mm = dev->mm_private;
1371
	struct drm_map_list *list = &obj->base.map_list;
1372 1373

	drm_ht_remove_item(&mm->offset_hash, &list->hash);
C
Chris Wilson 已提交
1374 1375 1376
	drm_mm_put_block(list->file_offset_node);
	kfree(list->map);
	list->map = NULL;
1377 1378
}

1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
static uint32_t
i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
{
	struct drm_device *dev = obj->base.dev;
	uint32_t size;

	if (INTEL_INFO(dev)->gen >= 4 ||
	    obj->tiling_mode == I915_TILING_NONE)
		return obj->base.size;

	/* Previous chips need a power-of-two fence region when tiling */
	if (INTEL_INFO(dev)->gen == 3)
		size = 1024*1024;
	else
		size = 512*1024;

	while (size < obj->base.size)
		size <<= 1;

	return size;
}

1401 1402 1403 1404 1405
/**
 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
 * @obj: object to check
 *
 * Return the required GTT alignment for an object, taking into account
1406
 * potential fence register mapping.
1407 1408
 */
static uint32_t
1409
i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1410
{
1411
	struct drm_device *dev = obj->base.dev;
1412 1413 1414 1415 1416

	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
1417
	if (INTEL_INFO(dev)->gen >= 4 ||
1418
	    obj->tiling_mode == I915_TILING_NONE)
1419 1420
		return 4096;

1421 1422 1423 1424
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
1425
	return i915_gem_get_gtt_size(obj);
1426 1427
}

1428 1429 1430 1431 1432 1433 1434 1435
/**
 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
 *					 unfenced object
 * @obj: object to check
 *
 * Return the required GTT alignment for an object, only taking into account
 * unfenced tiled surface requirements.
 */
1436
uint32_t
1437
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1438
{
1439
	struct drm_device *dev = obj->base.dev;
1440 1441 1442 1443 1444 1445
	int tile_height;

	/*
	 * Minimum alignment is 4k (GTT page size) for sane hw.
	 */
	if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1446
	    obj->tiling_mode == I915_TILING_NONE)
1447 1448 1449 1450 1451 1452 1453
		return 4096;

	/*
	 * Older chips need unfenced tiled buffers to be aligned to the left
	 * edge of an even tile row (where tile rows are counted as if the bo is
	 * placed in a fenced gtt region).
	 */
1454 1455 1456
	if (IS_GEN2(dev))
		tile_height = 16;
	else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1457 1458 1459 1460
		tile_height = 32;
	else
		tile_height = 8;

1461
	return tile_height * obj->stride * 2;
1462 1463
}

1464
int
1465 1466 1467 1468
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
		  uint32_t handle,
		  uint64_t *offset)
1469
{
1470
	struct drm_i915_private *dev_priv = dev->dev_private;
1471
	struct drm_i915_gem_object *obj;
1472 1473 1474 1475 1476
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

1477
	ret = i915_mutex_lock_interruptible(dev);
1478
	if (ret)
1479
		return ret;
1480

1481
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1482
	if (&obj->base == NULL) {
1483 1484 1485
		ret = -ENOENT;
		goto unlock;
	}
1486

1487
	if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1488 1489 1490 1491
		ret = -E2BIG;
		goto unlock;
	}

1492
	if (obj->madv != I915_MADV_WILLNEED) {
1493
		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1494 1495
		ret = -EINVAL;
		goto out;
1496 1497
	}

1498
	if (!obj->base.map_list.map) {
1499
		ret = i915_gem_create_mmap_offset(obj);
1500 1501
		if (ret)
			goto out;
1502 1503
	}

1504
	*offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1505

1506
out:
1507
	drm_gem_object_unreference(&obj->base);
1508
unlock:
1509
	mutex_unlock(&dev->struct_mutex);
1510
	return ret;
1511 1512
}

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}


1541
static int
1542
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
			      gfp_t gfpmask)
{
	int page_count, i;
	struct address_space *mapping;
	struct inode *inode;
	struct page *page;

	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 */
1553 1554 1555 1556
	page_count = obj->base.size / PAGE_SIZE;
	BUG_ON(obj->pages != NULL);
	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
	if (obj->pages == NULL)
1557 1558
		return -ENOMEM;

1559
	inode = obj->base.filp->f_path.dentry->d_inode;
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569
	mapping = inode->i_mapping;
	for (i = 0; i < page_count; i++) {
		page = read_cache_page_gfp(mapping, i,
					   GFP_HIGHUSER |
					   __GFP_COLD |
					   __GFP_RECLAIMABLE |
					   gfpmask);
		if (IS_ERR(page))
			goto err_pages;

1570
		obj->pages[i] = page;
1571 1572
	}

1573
	if (obj->tiling_mode != I915_TILING_NONE)
1574 1575 1576 1577 1578 1579
		i915_gem_object_do_bit_17_swizzle(obj);

	return 0;

err_pages:
	while (i--)
1580
		page_cache_release(obj->pages[i]);
1581

1582 1583
	drm_free_large(obj->pages);
	obj->pages = NULL;
1584 1585 1586
	return PTR_ERR(page);
}

1587
static void
1588
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1589
{
1590
	int page_count = obj->base.size / PAGE_SIZE;
1591 1592
	int i;

1593
	BUG_ON(obj->madv == __I915_MADV_PURGED);
1594

1595
	if (obj->tiling_mode != I915_TILING_NONE)
1596 1597
		i915_gem_object_save_bit_17_swizzle(obj);

1598 1599
	if (obj->madv == I915_MADV_DONTNEED)
		obj->dirty = 0;
1600 1601

	for (i = 0; i < page_count; i++) {
1602 1603
		if (obj->dirty)
			set_page_dirty(obj->pages[i]);
1604

1605 1606
		if (obj->madv == I915_MADV_WILLNEED)
			mark_page_accessed(obj->pages[i]);
1607

1608
		page_cache_release(obj->pages[i]);
1609
	}
1610
	obj->dirty = 0;
1611

1612 1613
	drm_free_large(obj->pages);
	obj->pages = NULL;
1614 1615
}

1616
void
1617
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1618 1619
			       struct intel_ring_buffer *ring,
			       u32 seqno)
1620
{
1621
	struct drm_device *dev = obj->base.dev;
1622
	struct drm_i915_private *dev_priv = dev->dev_private;
1623

1624
	BUG_ON(ring == NULL);
1625
	obj->ring = ring;
1626 1627

	/* Add a reference if we're newly entering the active list. */
1628 1629 1630
	if (!obj->active) {
		drm_gem_object_reference(&obj->base);
		obj->active = 1;
1631
	}
1632

1633
	/* Move from whatever list we were on to the tail of execution. */
1634 1635
	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
	list_move_tail(&obj->ring_list, &ring->active_list);
1636

1637
	obj->last_rendering_seqno = seqno;
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655
	if (obj->fenced_gpu_access) {
		struct drm_i915_fence_reg *reg;

		BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);

		obj->last_fenced_seqno = seqno;
		obj->last_fenced_ring = ring;

		reg = &dev_priv->fence_regs[obj->fence_reg];
		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
	}
}

static void
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
	list_del_init(&obj->ring_list);
	obj->last_rendering_seqno = 0;
1656 1657
}

1658
static void
1659
i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1660
{
1661
	struct drm_device *dev = obj->base.dev;
1662 1663
	drm_i915_private_t *dev_priv = dev->dev_private;

1664 1665
	BUG_ON(!obj->active);
	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688

	i915_gem_object_move_off_active(obj);
}

static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (obj->pin_count != 0)
		list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
	else
		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);

	BUG_ON(!list_empty(&obj->gpu_write_list));
	BUG_ON(!obj->active);
	obj->ring = NULL;

	i915_gem_object_move_off_active(obj);
	obj->fenced_gpu_access = false;

	obj->active = 0;
1689
	obj->pending_gpu_write = false;
1690 1691 1692
	drm_gem_object_unreference(&obj->base);

	WARN_ON(i915_verify_lists(dev));
1693
}
1694

1695 1696
/* Immediately discard the backing storage */
static void
1697
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1698
{
C
Chris Wilson 已提交
1699
	struct inode *inode;
1700

1701 1702 1703 1704 1705 1706
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*. Here we mirror the actions taken
	 * when by shmem_delete_inode() to release the backing store.
	 */
1707
	inode = obj->base.filp->f_path.dentry->d_inode;
1708 1709 1710
	truncate_inode_pages(inode->i_mapping, 0);
	if (inode->i_op->truncate_range)
		inode->i_op->truncate_range(inode, 0, (loff_t)-1);
C
Chris Wilson 已提交
1711

1712
	obj->madv = __I915_MADV_PURGED;
1713 1714 1715
}

static inline int
1716
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1717
{
1718
	return obj->madv == I915_MADV_DONTNEED;
1719 1720
}

1721
static void
C
Chris Wilson 已提交
1722 1723
i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
			       uint32_t flush_domains)
1724
{
1725
	struct drm_i915_gem_object *obj, *next;
1726

1727
	list_for_each_entry_safe(obj, next,
1728
				 &ring->gpu_write_list,
1729
				 gpu_write_list) {
1730 1731
		if (obj->base.write_domain & flush_domains) {
			uint32_t old_write_domain = obj->base.write_domain;
1732

1733 1734
			obj->base.write_domain = 0;
			list_del_init(&obj->gpu_write_list);
1735
			i915_gem_object_move_to_active(obj, ring,
C
Chris Wilson 已提交
1736
						       i915_gem_next_request_seqno(ring));
1737 1738

			trace_i915_gem_object_change_domain(obj,
1739
							    obj->base.read_domains,
1740 1741 1742 1743
							    old_write_domain);
		}
	}
}
1744

1745
int
C
Chris Wilson 已提交
1746
i915_add_request(struct intel_ring_buffer *ring,
1747
		 struct drm_file *file,
C
Chris Wilson 已提交
1748
		 struct drm_i915_gem_request *request)
1749
{
C
Chris Wilson 已提交
1750
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1751 1752
	uint32_t seqno;
	int was_empty;
1753 1754 1755
	int ret;

	BUG_ON(request == NULL);
1756

1757 1758 1759
	ret = ring->add_request(ring, &seqno);
	if (ret)
	    return ret;
1760

C
Chris Wilson 已提交
1761
	trace_i915_gem_request_add(ring, seqno);
1762 1763

	request->seqno = seqno;
1764
	request->ring = ring;
1765
	request->emitted_jiffies = jiffies;
1766 1767 1768
	was_empty = list_empty(&ring->request_list);
	list_add_tail(&request->list, &ring->request_list);

C
Chris Wilson 已提交
1769 1770 1771
	if (file) {
		struct drm_i915_file_private *file_priv = file->driver_priv;

1772
		spin_lock(&file_priv->mm.lock);
1773
		request->file_priv = file_priv;
1774
		list_add_tail(&request->client_list,
1775
			      &file_priv->mm.request_list);
1776
		spin_unlock(&file_priv->mm.lock);
1777
	}
1778

C
Chris Wilson 已提交
1779 1780
	ring->outstanding_lazy_request = false;

B
Ben Gamari 已提交
1781
	if (!dev_priv->mm.suspended) {
1782 1783
		mod_timer(&dev_priv->hangcheck_timer,
			  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
B
Ben Gamari 已提交
1784
		if (was_empty)
1785 1786
			queue_delayed_work(dev_priv->wq,
					   &dev_priv->mm.retire_work, HZ);
B
Ben Gamari 已提交
1787
	}
1788
	return 0;
1789 1790
}

1791 1792
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1793
{
1794
	struct drm_i915_file_private *file_priv = request->file_priv;
1795

1796 1797
	if (!file_priv)
		return;
C
Chris Wilson 已提交
1798

1799
	spin_lock(&file_priv->mm.lock);
1800 1801 1802 1803
	if (request->file_priv) {
		list_del(&request->client_list);
		request->file_priv = NULL;
	}
1804
	spin_unlock(&file_priv->mm.lock);
1805 1806
}

1807 1808
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
				      struct intel_ring_buffer *ring)
1809
{
1810 1811
	while (!list_empty(&ring->request_list)) {
		struct drm_i915_gem_request *request;
1812

1813 1814 1815
		request = list_first_entry(&ring->request_list,
					   struct drm_i915_gem_request,
					   list);
1816

1817
		list_del(&request->list);
1818
		i915_gem_request_remove_from_client(request);
1819 1820
		kfree(request);
	}
1821

1822
	while (!list_empty(&ring->active_list)) {
1823
		struct drm_i915_gem_object *obj;
1824

1825 1826 1827
		obj = list_first_entry(&ring->active_list,
				       struct drm_i915_gem_object,
				       ring_list);
1828

1829 1830 1831
		obj->base.write_domain = 0;
		list_del_init(&obj->gpu_write_list);
		i915_gem_object_move_to_inactive(obj);
1832 1833 1834
	}
}

1835 1836 1837 1838 1839 1840 1841
static void i915_gem_reset_fences(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	for (i = 0; i < 16; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1842 1843 1844 1845 1846 1847 1848 1849
		struct drm_i915_gem_object *obj = reg->obj;

		if (!obj)
			continue;

		if (obj->tiling_mode)
			i915_gem_release_mmap(obj);

1850 1851 1852 1853 1854
		reg->obj->fence_reg = I915_FENCE_REG_NONE;
		reg->obj->fenced_gpu_access = false;
		reg->obj->last_fenced_seqno = 0;
		reg->obj->last_fenced_ring = NULL;
		i915_gem_clear_fence_reg(dev, reg);
1855 1856 1857
	}
}

1858
void i915_gem_reset(struct drm_device *dev)
1859
{
1860
	struct drm_i915_private *dev_priv = dev->dev_private;
1861
	struct drm_i915_gem_object *obj;
1862
	int i;
1863

1864 1865
	for (i = 0; i < I915_NUM_RINGS; i++)
		i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1866 1867 1868 1869 1870 1871

	/* Remove anything from the flushing lists. The GPU cache is likely
	 * to be lost on reset along with the data, so simply move the
	 * lost bo to the inactive list.
	 */
	while (!list_empty(&dev_priv->mm.flushing_list)) {
1872 1873 1874
		obj= list_first_entry(&dev_priv->mm.flushing_list,
				      struct drm_i915_gem_object,
				      mm_list);
1875

1876 1877 1878
		obj->base.write_domain = 0;
		list_del_init(&obj->gpu_write_list);
		i915_gem_object_move_to_inactive(obj);
1879 1880 1881 1882 1883
	}

	/* Move everything out of the GPU domains to ensure we do any
	 * necessary invalidation upon reuse.
	 */
1884
	list_for_each_entry(obj,
1885
			    &dev_priv->mm.inactive_list,
1886
			    mm_list)
1887
	{
1888
		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1889
	}
1890 1891

	/* The fence registers are invalidated so clear them out */
1892
	i915_gem_reset_fences(dev);
1893 1894 1895 1896 1897
}

/**
 * This function clears the request list as sequence numbers are passed.
 */
1898
static void
C
Chris Wilson 已提交
1899
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1900 1901
{
	uint32_t seqno;
1902
	int i;
1903

C
Chris Wilson 已提交
1904
	if (list_empty(&ring->request_list))
1905 1906
		return;

C
Chris Wilson 已提交
1907
	WARN_ON(i915_verify_lists(ring->dev));
1908

1909
	seqno = ring->get_seqno(ring);
1910

1911
	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1912 1913 1914
		if (seqno >= ring->sync_seqno[i])
			ring->sync_seqno[i] = 0;

1915
	while (!list_empty(&ring->request_list)) {
1916 1917
		struct drm_i915_gem_request *request;

1918
		request = list_first_entry(&ring->request_list,
1919 1920 1921
					   struct drm_i915_gem_request,
					   list);

1922
		if (!i915_seqno_passed(seqno, request->seqno))
1923 1924
			break;

C
Chris Wilson 已提交
1925
		trace_i915_gem_request_retire(ring, request->seqno);
1926 1927

		list_del(&request->list);
1928
		i915_gem_request_remove_from_client(request);
1929 1930
		kfree(request);
	}
1931

1932 1933 1934 1935
	/* Move any buffers on the active list that are no longer referenced
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
	 */
	while (!list_empty(&ring->active_list)) {
1936
		struct drm_i915_gem_object *obj;
1937

1938 1939 1940
		obj= list_first_entry(&ring->active_list,
				      struct drm_i915_gem_object,
				      ring_list);
1941

1942
		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1943
			break;
1944

1945
		if (obj->base.write_domain != 0)
1946 1947 1948
			i915_gem_object_move_to_flushing(obj);
		else
			i915_gem_object_move_to_inactive(obj);
1949
	}
1950

C
Chris Wilson 已提交
1951 1952
	if (unlikely(ring->trace_irq_seqno &&
		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1953
		ring->irq_put(ring);
C
Chris Wilson 已提交
1954
		ring->trace_irq_seqno = 0;
1955
	}
1956

C
Chris Wilson 已提交
1957
	WARN_ON(i915_verify_lists(ring->dev));
1958 1959
}

1960 1961 1962 1963
void
i915_gem_retire_requests(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1964
	int i;
1965

1966
	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1967
	    struct drm_i915_gem_object *obj, *next;
1968 1969 1970 1971 1972 1973

	    /* We must be careful that during unbind() we do not
	     * accidentally infinitely recurse into retire requests.
	     * Currently:
	     *   retire -> free -> unbind -> wait -> retire_ring
	     */
1974
	    list_for_each_entry_safe(obj, next,
1975
				     &dev_priv->mm.deferred_free_list,
1976
				     mm_list)
1977
		    i915_gem_free_object_tail(obj);
1978 1979
	}

1980
	for (i = 0; i < I915_NUM_RINGS; i++)
C
Chris Wilson 已提交
1981
		i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1982 1983
}

1984
static void
1985 1986 1987 1988
i915_gem_retire_work_handler(struct work_struct *work)
{
	drm_i915_private_t *dev_priv;
	struct drm_device *dev;
1989 1990
	bool idle;
	int i;
1991 1992 1993 1994 1995

	dev_priv = container_of(work, drm_i915_private_t,
				mm.retire_work.work);
	dev = dev_priv->dev;

1996 1997 1998 1999 2000 2001
	/* Come back later if the device is busy... */
	if (!mutex_trylock(&dev->struct_mutex)) {
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
		return;
	}

2002
	i915_gem_retire_requests(dev);
2003

2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
	/* Send a periodic flush down the ring so we don't hold onto GEM
	 * objects indefinitely.
	 */
	idle = true;
	for (i = 0; i < I915_NUM_RINGS; i++) {
		struct intel_ring_buffer *ring = &dev_priv->ring[i];

		if (!list_empty(&ring->gpu_write_list)) {
			struct drm_i915_gem_request *request;
			int ret;

C
Chris Wilson 已提交
2015 2016
			ret = i915_gem_flush_ring(ring,
						  0, I915_GEM_GPU_DOMAINS);
2017 2018
			request = kzalloc(sizeof(*request), GFP_KERNEL);
			if (ret || request == NULL ||
C
Chris Wilson 已提交
2019
			    i915_add_request(ring, NULL, request))
2020 2021 2022 2023 2024 2025 2026
			    kfree(request);
		}

		idle &= list_empty(&ring->request_list);
	}

	if (!dev_priv->mm.suspended && !idle)
2027
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2028

2029 2030 2031
	mutex_unlock(&dev->struct_mutex);
}

C
Chris Wilson 已提交
2032 2033 2034 2035
/**
 * Waits for a sequence number to be signaled, and cleans up the
 * request and object lists appropriately for that event.
 */
2036
int
C
Chris Wilson 已提交
2037
i915_wait_request(struct intel_ring_buffer *ring,
2038
		  uint32_t seqno)
2039
{
C
Chris Wilson 已提交
2040
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
2041
	u32 ier;
2042 2043 2044 2045
	int ret = 0;

	BUG_ON(seqno == 0);

2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
	if (atomic_read(&dev_priv->mm.wedged)) {
		struct completion *x = &dev_priv->error_completion;
		bool recovery_complete;
		unsigned long flags;

		/* Give the error handler a chance to run. */
		spin_lock_irqsave(&x->wait.lock, flags);
		recovery_complete = x->done > 0;
		spin_unlock_irqrestore(&x->wait.lock, flags);

		return recovery_complete ? -EIO : -EAGAIN;
	}
2058

2059
	if (seqno == ring->outstanding_lazy_request) {
2060 2061 2062 2063
		struct drm_i915_gem_request *request;

		request = kzalloc(sizeof(*request), GFP_KERNEL);
		if (request == NULL)
2064
			return -ENOMEM;
2065

C
Chris Wilson 已提交
2066
		ret = i915_add_request(ring, NULL, request);
2067 2068 2069 2070 2071 2072
		if (ret) {
			kfree(request);
			return ret;
		}

		seqno = request->seqno;
2073
	}
2074

2075
	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
C
Chris Wilson 已提交
2076
		if (HAS_PCH_SPLIT(ring->dev))
2077 2078 2079
			ier = I915_READ(DEIER) | I915_READ(GTIER);
		else
			ier = I915_READ(IER);
2080 2081 2082
		if (!ier) {
			DRM_ERROR("something (likely vbetool) disabled "
				  "interrupts, re-enabling\n");
C
Chris Wilson 已提交
2083 2084
			i915_driver_irq_preinstall(ring->dev);
			i915_driver_irq_postinstall(ring->dev);
2085 2086
		}

C
Chris Wilson 已提交
2087
		trace_i915_gem_request_wait_begin(ring, seqno);
C
Chris Wilson 已提交
2088

2089
		ring->waiting_seqno = seqno;
2090
		if (ring->irq_get(ring)) {
2091
			if (dev_priv->mm.interruptible)
2092 2093 2094 2095 2096 2097 2098 2099 2100
				ret = wait_event_interruptible(ring->irq_queue,
							       i915_seqno_passed(ring->get_seqno(ring), seqno)
							       || atomic_read(&dev_priv->mm.wedged));
			else
				wait_event(ring->irq_queue,
					   i915_seqno_passed(ring->get_seqno(ring), seqno)
					   || atomic_read(&dev_priv->mm.wedged));

			ring->irq_put(ring);
2101 2102 2103 2104
		} else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
						      seqno) ||
				    atomic_read(&dev_priv->mm.wedged), 3000))
			ret = -EBUSY;
2105
		ring->waiting_seqno = 0;
C
Chris Wilson 已提交
2106

C
Chris Wilson 已提交
2107
		trace_i915_gem_request_wait_end(ring, seqno);
2108
	}
2109
	if (atomic_read(&dev_priv->mm.wedged))
2110
		ret = -EAGAIN;
2111 2112

	if (ret && ret != -ERESTARTSYS)
2113
		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2114
			  __func__, ret, seqno, ring->get_seqno(ring),
2115
			  dev_priv->next_seqno);
2116 2117 2118 2119 2120 2121 2122

	/* Directly dispatch request retiring.  While we have the work queue
	 * to handle this, the waiter on a request often wants an associated
	 * buffer to have made it to the inactive list, and we would need
	 * a separate wait queue to handle that.
	 */
	if (ret == 0)
C
Chris Wilson 已提交
2123
		i915_gem_retire_requests_ring(ring);
2124 2125 2126 2127 2128 2129 2130 2131

	return ret;
}

/**
 * Ensures that all rendering to the object has completed and the object is
 * safe to unbind from the GTT or access from the CPU.
 */
2132
int
2133
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2134 2135 2136
{
	int ret;

2137 2138
	/* This function only exists to support waiting for existing rendering,
	 * not for emitting required flushes.
2139
	 */
2140
	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2141 2142 2143 2144

	/* If there is rendering queued on the buffer being evicted, wait for
	 * it.
	 */
2145
	if (obj->active) {
2146
		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2147
		if (ret)
2148 2149 2150 2151 2152 2153 2154 2155 2156
			return ret;
	}

	return 0;
}

/**
 * Unbinds an object from the GTT aperture.
 */
2157
int
2158
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2159 2160 2161
{
	int ret = 0;

2162
	if (obj->gtt_space == NULL)
2163 2164
		return 0;

2165
	if (obj->pin_count != 0) {
2166 2167 2168 2169
		DRM_ERROR("Attempting to unbind pinned buffer\n");
		return -EINVAL;
	}

2170 2171 2172
	/* blow away mappings if mapped through GTT */
	i915_gem_release_mmap(obj);

2173 2174 2175 2176 2177 2178
	/* Move the object to the CPU domain to ensure that
	 * any possible CPU writes while it's not in the GTT
	 * are flushed when we go to remap it. This will
	 * also ensure that all pending GPU writes are finished
	 * before we unbind.
	 */
2179
	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2180
	if (ret == -ERESTARTSYS)
2181
		return ret;
2182 2183 2184 2185
	/* Continue on if we fail due to EIO, the GPU is hung so we
	 * should be safe and we need to cleanup or else we might
	 * cause memory corruption through use-after-free.
	 */
2186 2187
	if (ret) {
		i915_gem_clflush_object(obj);
2188
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2189
	}
2190

2191
	/* release the fence reg _after_ flushing */
2192 2193 2194
	ret = i915_gem_object_put_fence(obj);
	if (ret == -ERESTARTSYS)
		return ret;
2195

C
Chris Wilson 已提交
2196 2197
	trace_i915_gem_object_unbind(obj);

2198
	i915_gem_gtt_unbind_object(obj);
2199
	i915_gem_object_put_pages_gtt(obj);
2200

2201
	list_del_init(&obj->gtt_list);
2202
	list_del_init(&obj->mm_list);
2203
	/* Avoid an unnecessary call to unbind on rebind. */
2204
	obj->map_and_fenceable = true;
2205

2206 2207 2208
	drm_mm_put_block(obj->gtt_space);
	obj->gtt_space = NULL;
	obj->gtt_offset = 0;
2209

2210
	if (i915_gem_object_is_purgeable(obj))
2211 2212
		i915_gem_object_truncate(obj);

2213
	return ret;
2214 2215
}

2216
int
C
Chris Wilson 已提交
2217
i915_gem_flush_ring(struct intel_ring_buffer *ring,
2218 2219 2220
		    uint32_t invalidate_domains,
		    uint32_t flush_domains)
{
2221 2222
	int ret;

2223 2224 2225
	if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
		return 0;

C
Chris Wilson 已提交
2226 2227
	trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);

2228 2229 2230 2231
	ret = ring->flush(ring, invalidate_domains, flush_domains);
	if (ret)
		return ret;

2232 2233 2234
	if (flush_domains & I915_GEM_GPU_DOMAINS)
		i915_gem_process_flushing_list(ring, flush_domains);

2235
	return 0;
2236 2237
}

C
Chris Wilson 已提交
2238
static int i915_ring_idle(struct intel_ring_buffer *ring)
2239
{
2240 2241
	int ret;

2242
	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2243 2244
		return 0;

2245
	if (!list_empty(&ring->gpu_write_list)) {
C
Chris Wilson 已提交
2246
		ret = i915_gem_flush_ring(ring,
2247
				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2248 2249 2250 2251
		if (ret)
			return ret;
	}

2252
	return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2253 2254
}

2255
int
2256 2257 2258 2259
i915_gpu_idle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	bool lists_empty;
2260
	int ret, i;
2261

2262
	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2263
		       list_empty(&dev_priv->mm.active_list));
2264 2265 2266 2267
	if (lists_empty)
		return 0;

	/* Flush everything onto the inactive list. */
2268
	for (i = 0; i < I915_NUM_RINGS; i++) {
C
Chris Wilson 已提交
2269
		ret = i915_ring_idle(&dev_priv->ring[i]);
2270 2271 2272
		if (ret)
			return ret;
	}
2273

2274
	return 0;
2275 2276
}

2277 2278
static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
				       struct intel_ring_buffer *pipelined)
2279
{
2280
	struct drm_device *dev = obj->base.dev;
2281
	drm_i915_private_t *dev_priv = dev->dev_private;
2282 2283
	u32 size = obj->gtt_space->size;
	int regnum = obj->fence_reg;
2284 2285
	uint64_t val;

2286
	val = (uint64_t)((obj->gtt_offset + size - 4096) &
2287
			 0xfffff000) << 32;
2288 2289
	val |= obj->gtt_offset & 0xfffff000;
	val |= (uint64_t)((obj->stride / 128) - 1) <<
2290 2291
		SANDYBRIDGE_FENCE_PITCH_SHIFT;

2292
	if (obj->tiling_mode == I915_TILING_Y)
2293 2294 2295
		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
	val |= I965_FENCE_REG_VALID;

2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311
	if (pipelined) {
		int ret = intel_ring_begin(pipelined, 6);
		if (ret)
			return ret;

		intel_ring_emit(pipelined, MI_NOOP);
		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
		intel_ring_emit(pipelined, (u32)val);
		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
		intel_ring_emit(pipelined, (u32)(val >> 32));
		intel_ring_advance(pipelined);
	} else
		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);

	return 0;
2312 2313
}

2314 2315
static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
				struct intel_ring_buffer *pipelined)
2316
{
2317
	struct drm_device *dev = obj->base.dev;
2318
	drm_i915_private_t *dev_priv = dev->dev_private;
2319 2320
	u32 size = obj->gtt_space->size;
	int regnum = obj->fence_reg;
2321 2322
	uint64_t val;

2323
	val = (uint64_t)((obj->gtt_offset + size - 4096) &
2324
		    0xfffff000) << 32;
2325 2326 2327
	val |= obj->gtt_offset & 0xfffff000;
	val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
	if (obj->tiling_mode == I915_TILING_Y)
2328 2329 2330
		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
	val |= I965_FENCE_REG_VALID;

2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
	if (pipelined) {
		int ret = intel_ring_begin(pipelined, 6);
		if (ret)
			return ret;

		intel_ring_emit(pipelined, MI_NOOP);
		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
		intel_ring_emit(pipelined, (u32)val);
		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
		intel_ring_emit(pipelined, (u32)(val >> 32));
		intel_ring_advance(pipelined);
	} else
		I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);

	return 0;
2347 2348
}

2349 2350
static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
				struct intel_ring_buffer *pipelined)
2351
{
2352
	struct drm_device *dev = obj->base.dev;
2353
	drm_i915_private_t *dev_priv = dev->dev_private;
2354
	u32 size = obj->gtt_space->size;
2355
	u32 fence_reg, val, pitch_val;
2356
	int tile_width;
2357

2358 2359 2360 2361 2362 2363
	if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
		 (size & -size) != size ||
		 (obj->gtt_offset & (size - 1)),
		 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
		 obj->gtt_offset, obj->map_and_fenceable, size))
		return -EINVAL;
2364

2365
	if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2366
		tile_width = 128;
2367
	else
2368 2369 2370
		tile_width = 512;

	/* Note: pitch better be a power of two tile widths */
2371
	pitch_val = obj->stride / tile_width;
2372
	pitch_val = ffs(pitch_val) - 1;
2373

2374 2375
	val = obj->gtt_offset;
	if (obj->tiling_mode == I915_TILING_Y)
2376
		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2377
	val |= I915_FENCE_SIZE_BITS(size);
2378 2379 2380
	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
	val |= I830_FENCE_REG_VALID;

2381
	fence_reg = obj->fence_reg;
2382 2383
	if (fence_reg < 8)
		fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2384
	else
2385
		fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400

	if (pipelined) {
		int ret = intel_ring_begin(pipelined, 4);
		if (ret)
			return ret;

		intel_ring_emit(pipelined, MI_NOOP);
		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(pipelined, fence_reg);
		intel_ring_emit(pipelined, val);
		intel_ring_advance(pipelined);
	} else
		I915_WRITE(fence_reg, val);

	return 0;
2401 2402
}

2403 2404
static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
				struct intel_ring_buffer *pipelined)
2405
{
2406
	struct drm_device *dev = obj->base.dev;
2407
	drm_i915_private_t *dev_priv = dev->dev_private;
2408 2409
	u32 size = obj->gtt_space->size;
	int regnum = obj->fence_reg;
2410 2411 2412
	uint32_t val;
	uint32_t pitch_val;

2413 2414 2415 2416 2417 2418
	if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
		 (size & -size) != size ||
		 (obj->gtt_offset & (size - 1)),
		 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
		 obj->gtt_offset, size))
		return -EINVAL;
2419

2420
	pitch_val = obj->stride / 128;
2421 2422
	pitch_val = ffs(pitch_val) - 1;

2423 2424
	val = obj->gtt_offset;
	if (obj->tiling_mode == I915_TILING_Y)
2425
		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2426
	val |= I830_FENCE_SIZE_BITS(size);
2427 2428 2429
	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
	val |= I830_FENCE_REG_VALID;

2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
	if (pipelined) {
		int ret = intel_ring_begin(pipelined, 4);
		if (ret)
			return ret;

		intel_ring_emit(pipelined, MI_NOOP);
		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
		intel_ring_emit(pipelined, val);
		intel_ring_advance(pipelined);
	} else
		I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);

	return 0;
2444 2445
}

2446 2447 2448 2449 2450 2451 2452
static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
	return i915_seqno_passed(ring->get_seqno(ring), seqno);
}

static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2453
			    struct intel_ring_buffer *pipelined)
2454 2455 2456 2457
{
	int ret;

	if (obj->fenced_gpu_access) {
2458
		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
C
Chris Wilson 已提交
2459
			ret = i915_gem_flush_ring(obj->last_fenced_ring,
2460 2461 2462 2463
						  0, obj->base.write_domain);
			if (ret)
				return ret;
		}
2464 2465 2466 2467 2468 2469 2470

		obj->fenced_gpu_access = false;
	}

	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
		if (!ring_passed_seqno(obj->last_fenced_ring,
				       obj->last_fenced_seqno)) {
C
Chris Wilson 已提交
2471
			ret = i915_wait_request(obj->last_fenced_ring,
2472
						obj->last_fenced_seqno);
2473 2474 2475 2476 2477 2478 2479 2480
			if (ret)
				return ret;
		}

		obj->last_fenced_seqno = 0;
		obj->last_fenced_ring = NULL;
	}

2481 2482 2483 2484 2485 2486
	/* Ensure that all CPU reads are completed before installing a fence
	 * and all writes before removing the fence.
	 */
	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
		mb();

2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
	return 0;
}

int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
	int ret;

	if (obj->tiling_mode)
		i915_gem_release_mmap(obj);

2498
	ret = i915_gem_object_flush_fence(obj, NULL);
2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515
	if (ret)
		return ret;

	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
		i915_gem_clear_fence_reg(obj->base.dev,
					 &dev_priv->fence_regs[obj->fence_reg]);

		obj->fence_reg = I915_FENCE_REG_NONE;
	}

	return 0;
}

static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device *dev,
		    struct intel_ring_buffer *pipelined)
2516 2517
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2518 2519
	struct drm_i915_fence_reg *reg, *first, *avail;
	int i;
2520 2521

	/* First try to find a free reg */
2522
	avail = NULL;
2523 2524 2525
	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
		reg = &dev_priv->fence_regs[i];
		if (!reg->obj)
2526
			return reg;
2527

2528
		if (!reg->obj->pin_count)
2529
			avail = reg;
2530 2531
	}

2532 2533
	if (avail == NULL)
		return NULL;
2534 2535

	/* None available, try to steal one or wait for a user to finish */
2536 2537 2538
	avail = first = NULL;
	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
		if (reg->obj->pin_count)
2539 2540
			continue;

2541 2542 2543 2544 2545 2546 2547 2548 2549
		if (first == NULL)
			first = reg;

		if (!pipelined ||
		    !reg->obj->last_fenced_ring ||
		    reg->obj->last_fenced_ring == pipelined) {
			avail = reg;
			break;
		}
2550 2551
	}

2552 2553
	if (avail == NULL)
		avail = first;
2554

2555
	return avail;
2556 2557
}

2558
/**
2559
 * i915_gem_object_get_fence - set up a fence reg for an object
2560
 * @obj: object to map through a fence reg
2561 2562
 * @pipelined: ring on which to queue the change, or NULL for CPU access
 * @interruptible: must we wait uninterruptibly for the register to retire?
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572
 *
 * When mapping objects through the GTT, userspace wants to be able to write
 * to them without having to worry about swizzling if the object is tiled.
 *
 * This function walks the fence regs looking for a free one for @obj,
 * stealing one if it can't find any.
 *
 * It then sets up the reg based on the object's properties: address, pitch
 * and tiling format.
 */
2573
int
2574
i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2575
			  struct intel_ring_buffer *pipelined)
2576
{
2577
	struct drm_device *dev = obj->base.dev;
J
Jesse Barnes 已提交
2578
	struct drm_i915_private *dev_priv = dev->dev_private;
2579
	struct drm_i915_fence_reg *reg;
2580
	int ret;
2581

2582 2583 2584
	/* XXX disable pipelining. There are bugs. Shocking. */
	pipelined = NULL;

2585
	/* Just update our place in the LRU if our fence is getting reused. */
2586 2587
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
		reg = &dev_priv->fence_regs[obj->fence_reg];
2588
		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2589

2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
		if (obj->tiling_changed) {
			ret = i915_gem_object_flush_fence(obj, pipelined);
			if (ret)
				return ret;

			if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
				pipelined = NULL;

			if (pipelined) {
				reg->setup_seqno =
					i915_gem_next_request_seqno(pipelined);
				obj->last_fenced_seqno = reg->setup_seqno;
				obj->last_fenced_ring = pipelined;
			}

			goto update;
		}
2607 2608 2609 2610 2611

		if (!pipelined) {
			if (reg->setup_seqno) {
				if (!ring_passed_seqno(obj->last_fenced_ring,
						       reg->setup_seqno)) {
C
Chris Wilson 已提交
2612
					ret = i915_wait_request(obj->last_fenced_ring,
2613
								reg->setup_seqno);
2614 2615 2616 2617 2618 2619 2620 2621
					if (ret)
						return ret;
				}

				reg->setup_seqno = 0;
			}
		} else if (obj->last_fenced_ring &&
			   obj->last_fenced_ring != pipelined) {
2622
			ret = i915_gem_object_flush_fence(obj, pipelined);
2623 2624 2625 2626
			if (ret)
				return ret;
		}

2627 2628 2629
		return 0;
	}

2630 2631 2632
	reg = i915_find_fence_reg(dev, pipelined);
	if (reg == NULL)
		return -ENOSPC;
2633

2634
	ret = i915_gem_object_flush_fence(obj, pipelined);
2635
	if (ret)
2636
		return ret;
2637

2638 2639 2640 2641 2642 2643 2644 2645
	if (reg->obj) {
		struct drm_i915_gem_object *old = reg->obj;

		drm_gem_object_reference(&old->base);

		if (old->tiling_mode)
			i915_gem_release_mmap(old);

2646
		ret = i915_gem_object_flush_fence(old, pipelined);
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
		if (ret) {
			drm_gem_object_unreference(&old->base);
			return ret;
		}

		if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
			pipelined = NULL;

		old->fence_reg = I915_FENCE_REG_NONE;
		old->last_fenced_ring = pipelined;
		old->last_fenced_seqno =
C
Chris Wilson 已提交
2658
			pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2659 2660 2661 2662

		drm_gem_object_unreference(&old->base);
	} else if (obj->last_fenced_seqno == 0)
		pipelined = NULL;
2663

2664
	reg->obj = obj;
2665 2666 2667
	list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
	obj->fence_reg = reg - dev_priv->fence_regs;
	obj->last_fenced_ring = pipelined;
2668

2669
	reg->setup_seqno =
C
Chris Wilson 已提交
2670
		pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2671 2672 2673 2674
	obj->last_fenced_seqno = reg->setup_seqno;

update:
	obj->tiling_changed = false;
2675
	switch (INTEL_INFO(dev)->gen) {
2676
	case 7:
2677
	case 6:
2678
		ret = sandybridge_write_fence_reg(obj, pipelined);
2679 2680 2681
		break;
	case 5:
	case 4:
2682
		ret = i965_write_fence_reg(obj, pipelined);
2683 2684
		break;
	case 3:
2685
		ret = i915_write_fence_reg(obj, pipelined);
2686 2687
		break;
	case 2:
2688
		ret = i830_write_fence_reg(obj, pipelined);
2689 2690
		break;
	}
2691

2692
	return ret;
2693 2694 2695 2696 2697 2698 2699
}

/**
 * i915_gem_clear_fence_reg - clear out fence register info
 * @obj: object to clear
 *
 * Zeroes out the fence register itself and clears out the associated
2700
 * data structures in dev_priv and obj.
2701 2702
 */
static void
2703 2704
i915_gem_clear_fence_reg(struct drm_device *dev,
			 struct drm_i915_fence_reg *reg)
2705
{
J
Jesse Barnes 已提交
2706
	drm_i915_private_t *dev_priv = dev->dev_private;
2707
	uint32_t fence_reg = reg - dev_priv->fence_regs;
2708

2709
	switch (INTEL_INFO(dev)->gen) {
2710
	case 7:
2711
	case 6:
2712
		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2713 2714 2715
		break;
	case 5:
	case 4:
2716
		I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2717 2718
		break;
	case 3:
2719 2720
		if (fence_reg >= 8)
			fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2721
		else
2722
	case 2:
2723
			fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2724 2725

		I915_WRITE(fence_reg, 0);
2726
		break;
2727
	}
2728

2729
	list_del_init(&reg->lru_list);
2730 2731
	reg->obj = NULL;
	reg->setup_seqno = 0;
2732 2733
}

2734 2735 2736 2737
/**
 * Finds free space in the GTT aperture and binds the object there.
 */
static int
2738
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2739
			    unsigned alignment,
2740
			    bool map_and_fenceable)
2741
{
2742
	struct drm_device *dev = obj->base.dev;
2743 2744
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_mm_node *free_space;
2745
	gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2746
	u32 size, fence_size, fence_alignment, unfenced_alignment;
2747
	bool mappable, fenceable;
2748
	int ret;
2749

2750
	if (obj->madv != I915_MADV_WILLNEED) {
2751 2752 2753 2754
		DRM_ERROR("Attempting to bind a purgeable object\n");
		return -EINVAL;
	}

2755 2756 2757
	fence_size = i915_gem_get_gtt_size(obj);
	fence_alignment = i915_gem_get_gtt_alignment(obj);
	unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
2758

2759
	if (alignment == 0)
2760 2761
		alignment = map_and_fenceable ? fence_alignment :
						unfenced_alignment;
2762
	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2763 2764 2765 2766
		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
		return -EINVAL;
	}

2767
	size = map_and_fenceable ? fence_size : obj->base.size;
2768

2769 2770 2771
	/* If the object is bigger than the entire aperture, reject it early
	 * before evicting everything in a vain attempt to find space.
	 */
2772
	if (obj->base.size >
2773
	    (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2774 2775 2776 2777
		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
		return -E2BIG;
	}

2778
 search_free:
2779
	if (map_and_fenceable)
2780 2781
		free_space =
			drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2782
						    size, alignment, 0,
2783 2784 2785 2786
						    dev_priv->mm.gtt_mappable_end,
						    0);
	else
		free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2787
						size, alignment, 0);
2788 2789

	if (free_space != NULL) {
2790
		if (map_and_fenceable)
2791
			obj->gtt_space =
2792
				drm_mm_get_block_range_generic(free_space,
2793
							       size, alignment, 0,
2794 2795 2796
							       dev_priv->mm.gtt_mappable_end,
							       0);
		else
2797
			obj->gtt_space =
2798
				drm_mm_get_block(free_space, size, alignment);
2799
	}
2800
	if (obj->gtt_space == NULL) {
2801 2802 2803
		/* If the gtt is empty and we're still having trouble
		 * fitting our object in, we're out of memory.
		 */
2804 2805
		ret = i915_gem_evict_something(dev, size, alignment,
					       map_and_fenceable);
2806
		if (ret)
2807
			return ret;
2808

2809 2810 2811
		goto search_free;
	}

2812
	ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2813
	if (ret) {
2814 2815
		drm_mm_put_block(obj->gtt_space);
		obj->gtt_space = NULL;
2816 2817

		if (ret == -ENOMEM) {
2818 2819
			/* first try to reclaim some memory by clearing the GTT */
			ret = i915_gem_evict_everything(dev, false);
2820 2821
			if (ret) {
				/* now try to shrink everyone else */
2822 2823 2824
				if (gfpmask) {
					gfpmask = 0;
					goto search_free;
2825 2826
				}

2827
				return -ENOMEM;
2828 2829 2830 2831 2832
			}

			goto search_free;
		}

2833 2834 2835
		return ret;
	}

2836 2837
	ret = i915_gem_gtt_bind_object(obj);
	if (ret) {
2838
		i915_gem_object_put_pages_gtt(obj);
2839 2840
		drm_mm_put_block(obj->gtt_space);
		obj->gtt_space = NULL;
2841

2842
		if (i915_gem_evict_everything(dev, false))
2843 2844 2845
			return ret;

		goto search_free;
2846 2847
	}

2848
	list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2849
	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2850

2851 2852 2853 2854
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2855 2856
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2857

2858
	obj->gtt_offset = obj->gtt_space->start;
C
Chris Wilson 已提交
2859

2860
	fenceable =
2861 2862
		obj->gtt_space->size == fence_size &&
		(obj->gtt_space->start & (fence_alignment -1)) == 0;
2863

2864
	mappable =
2865
		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2866

2867
	obj->map_and_fenceable = mappable && fenceable;
2868

C
Chris Wilson 已提交
2869
	trace_i915_gem_object_bind(obj, map_and_fenceable);
2870 2871 2872 2873
	return 0;
}

void
2874
i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2875 2876 2877 2878 2879
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
2880
	if (obj->pages == NULL)
2881 2882
		return;

2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
	if (obj->cache_level != I915_CACHE_NONE)
		return;

C
Chris Wilson 已提交
2894
	trace_i915_gem_object_clflush(obj);
2895

2896
	drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2897 2898
}

2899
/** Flushes any GPU write domain for the object if it's dirty. */
2900
static int
2901
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2902
{
2903
	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2904
		return 0;
2905 2906

	/* Queue the GPU write cache flushing we need. */
C
Chris Wilson 已提交
2907
	return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2908 2909 2910 2911
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
2912
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2913
{
C
Chris Wilson 已提交
2914 2915
	uint32_t old_write_domain;

2916
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2917 2918
		return;

2919
	/* No actual flushing is required for the GTT write domain.  Writes
2920 2921
	 * to it immediately go to main memory as far as we know, so there's
	 * no chipset flush.  It also doesn't land in render cache.
2922 2923 2924 2925
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
2926
	 */
2927 2928
	wmb();

2929 2930
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
2931 2932

	trace_i915_gem_object_change_domain(obj,
2933
					    obj->base.read_domains,
C
Chris Wilson 已提交
2934
					    old_write_domain);
2935 2936 2937 2938
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
2939
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2940
{
C
Chris Wilson 已提交
2941
	uint32_t old_write_domain;
2942

2943
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2944 2945 2946
		return;

	i915_gem_clflush_object(obj);
2947
	intel_gtt_chipset_flush();
2948 2949
	old_write_domain = obj->base.write_domain;
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
2950 2951

	trace_i915_gem_object_change_domain(obj,
2952
					    obj->base.read_domains,
C
Chris Wilson 已提交
2953
					    old_write_domain);
2954 2955
}

2956 2957 2958 2959 2960 2961
/**
 * Moves a single object to the GTT read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
2962
int
2963
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2964
{
C
Chris Wilson 已提交
2965
	uint32_t old_write_domain, old_read_domains;
2966
	int ret;
2967

2968
	/* Not valid to be called on unbound objects. */
2969
	if (obj->gtt_space == NULL)
2970 2971
		return -EINVAL;

2972 2973 2974
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

2975 2976 2977 2978
	ret = i915_gem_object_flush_gpu_write_domain(obj);
	if (ret)
		return ret;

2979
	if (obj->pending_gpu_write || write) {
2980
		ret = i915_gem_object_wait_rendering(obj);
2981 2982 2983
		if (ret)
			return ret;
	}
2984

2985
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
2986

2987 2988
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
2989

2990 2991 2992
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
2993 2994
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2995
	if (write) {
2996 2997 2998
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
		obj->dirty = 1;
2999 3000
	}

C
Chris Wilson 已提交
3001 3002 3003 3004
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3005 3006 3007
	return 0;
}

3008 3009 3010 3011 3012
/*
 * Prepare buffer for display plane. Use uninterruptible for possible flush
 * wait, as in modesetting process we're not supposed to be interrupted.
 */
int
3013
i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
3014
				     struct intel_ring_buffer *pipelined)
3015
{
3016
	uint32_t old_read_domains;
3017 3018 3019
	int ret;

	/* Not valid to be called on unbound objects. */
3020
	if (obj->gtt_space == NULL)
3021 3022
		return -EINVAL;

3023 3024 3025 3026
	ret = i915_gem_object_flush_gpu_write_domain(obj);
	if (ret)
		return ret;

3027

3028
	/* Currently, we are always called from an non-interruptible context. */
3029
	if (pipelined != obj->ring) {
3030
		ret = i915_gem_object_wait_rendering(obj);
3031
		if (ret)
3032 3033 3034
			return ret;
	}

3035 3036
	i915_gem_object_flush_cpu_write_domain(obj);

3037 3038
	old_read_domains = obj->base.read_domains;
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3039 3040 3041

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3042
					    obj->base.write_domain);
3043 3044 3045 3046

	return 0;
}

3047
int
3048
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
3049
{
3050 3051
	int ret;

3052 3053 3054
	if (!obj->active)
		return 0;

3055
	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
C
Chris Wilson 已提交
3056
		ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3057 3058 3059
		if (ret)
			return ret;
	}
3060

3061
	return i915_gem_object_wait_rendering(obj);
3062 3063
}

3064 3065 3066 3067 3068 3069 3070
/**
 * Moves a single object to the CPU read, and possibly write domain.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
static int
3071
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3072
{
C
Chris Wilson 已提交
3073
	uint32_t old_write_domain, old_read_domains;
3074 3075
	int ret;

3076 3077 3078
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3079 3080 3081 3082
	ret = i915_gem_object_flush_gpu_write_domain(obj);
	if (ret)
		return ret;

3083
	ret = i915_gem_object_wait_rendering(obj);
3084
	if (ret)
3085
		return ret;
3086

3087
	i915_gem_object_flush_gtt_write_domain(obj);
3088

3089 3090
	/* If we have a partially-valid cache of the object in the CPU,
	 * finish invalidating it and free the per-page flags.
3091
	 */
3092
	i915_gem_object_set_to_full_cpu_read_domain(obj);
3093

3094 3095
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3096

3097
	/* Flush the CPU cache if it's still invalid. */
3098
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3099 3100
		i915_gem_clflush_object(obj);

3101
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3102 3103 3104 3105 3106
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3107
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3108 3109 3110 3111 3112

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3113 3114
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3115
	}
3116

C
Chris Wilson 已提交
3117 3118 3119 3120
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3121 3122 3123
	return 0;
}

3124
/**
3125
 * Moves the object from a partially CPU read to a full one.
3126
 *
3127 3128
 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3129
 */
3130
static void
3131
i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3132
{
3133
	if (!obj->page_cpu_valid)
3134 3135 3136 3137
		return;

	/* If we're partially in the CPU read domain, finish moving it in.
	 */
3138
	if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3139 3140
		int i;

3141 3142
		for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
			if (obj->page_cpu_valid[i])
3143
				continue;
3144
			drm_clflush_pages(obj->pages + i, 1);
3145 3146 3147 3148 3149 3150
		}
	}

	/* Free the page_cpu_valid mappings which are now stale, whether
	 * or not we've got I915_GEM_DOMAIN_CPU.
	 */
3151 3152
	kfree(obj->page_cpu_valid);
	obj->page_cpu_valid = NULL;
3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167
}

/**
 * Set the CPU read domain on a range of the object.
 *
 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
 * not entirely valid.  The page_cpu_valid member of the object flags which
 * pages have been flushed, and will be respected by
 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
 * of the whole object.
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
static int
3168
i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3169 3170
					  uint64_t offset, uint64_t size)
{
C
Chris Wilson 已提交
3171
	uint32_t old_read_domains;
3172
	int i, ret;
3173

3174
	if (offset == 0 && size == obj->base.size)
3175
		return i915_gem_object_set_to_cpu_domain(obj, 0);
3176

3177 3178 3179 3180
	ret = i915_gem_object_flush_gpu_write_domain(obj);
	if (ret)
		return ret;

3181
	ret = i915_gem_object_wait_rendering(obj);
3182
	if (ret)
3183
		return ret;
3184

3185 3186 3187
	i915_gem_object_flush_gtt_write_domain(obj);

	/* If we're already fully in the CPU read domain, we're done. */
3188 3189
	if (obj->page_cpu_valid == NULL &&
	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3190
		return 0;
3191

3192 3193 3194
	/* Otherwise, create/clear the per-page CPU read domain flag if we're
	 * newly adding I915_GEM_DOMAIN_CPU
	 */
3195 3196 3197 3198
	if (obj->page_cpu_valid == NULL) {
		obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
					      GFP_KERNEL);
		if (obj->page_cpu_valid == NULL)
3199
			return -ENOMEM;
3200 3201
	} else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
		memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3202 3203 3204 3205

	/* Flush the cache on any pages that are still invalid from the CPU's
	 * perspective.
	 */
3206 3207
	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
	     i++) {
3208
		if (obj->page_cpu_valid[i])
3209 3210
			continue;

3211
		drm_clflush_pages(obj->pages + i, 1);
3212

3213
		obj->page_cpu_valid[i] = 1;
3214 3215
	}

3216 3217 3218
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3219
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3220

3221 3222
	old_read_domains = obj->base.read_domains;
	obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3223

C
Chris Wilson 已提交
3224 3225
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3226
					    obj->base.write_domain);
C
Chris Wilson 已提交
3227

3228 3229 3230 3231 3232 3233
	return 0;
}

/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3234 3235 3236 3237
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3238 3239 3240
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3241
static int
3242
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3243
{
3244 3245
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_file_private *file_priv = file->driver_priv;
3246
	unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3247 3248 3249 3250
	struct drm_i915_gem_request *request;
	struct intel_ring_buffer *ring = NULL;
	u32 seqno = 0;
	int ret;
3251

3252 3253 3254
	if (atomic_read(&dev_priv->mm.wedged))
		return -EIO;

3255
	spin_lock(&file_priv->mm.lock);
3256
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3257 3258
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3259

3260 3261
		ring = request->ring;
		seqno = request->seqno;
3262
	}
3263
	spin_unlock(&file_priv->mm.lock);
3264

3265 3266
	if (seqno == 0)
		return 0;
3267

3268
	ret = 0;
3269
	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3270 3271 3272 3273 3274
		/* And wait for the seqno passing without holding any locks and
		 * causing extra latency for others. This is safe as the irq
		 * generation is designed to be run atomically and so is
		 * lockless.
		 */
3275 3276 3277 3278 3279
		if (ring->irq_get(ring)) {
			ret = wait_event_interruptible(ring->irq_queue,
						       i915_seqno_passed(ring->get_seqno(ring), seqno)
						       || atomic_read(&dev_priv->mm.wedged));
			ring->irq_put(ring);
3280

3281 3282 3283
			if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
				ret = -EIO;
		}
3284 3285
	}

3286 3287
	if (ret == 0)
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3288 3289 3290 3291

	return ret;
}

3292
int
3293 3294
i915_gem_object_pin(struct drm_i915_gem_object *obj,
		    uint32_t alignment,
3295
		    bool map_and_fenceable)
3296
{
3297
	struct drm_device *dev = obj->base.dev;
C
Chris Wilson 已提交
3298
	struct drm_i915_private *dev_priv = dev->dev_private;
3299 3300
	int ret;

3301
	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3302
	WARN_ON(i915_verify_lists(dev));
3303

3304 3305 3306 3307
	if (obj->gtt_space != NULL) {
		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
		    (map_and_fenceable && !obj->map_and_fenceable)) {
			WARN(obj->pin_count,
3308
			     "bo is already pinned with incorrect alignment:"
3309 3310
			     " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
			     " obj->map_and_fenceable=%d\n",
3311
			     obj->gtt_offset, alignment,
3312
			     map_and_fenceable,
3313
			     obj->map_and_fenceable);
3314 3315 3316 3317 3318 3319
			ret = i915_gem_object_unbind(obj);
			if (ret)
				return ret;
		}
	}

3320
	if (obj->gtt_space == NULL) {
3321
		ret = i915_gem_object_bind_to_gtt(obj, alignment,
3322
						  map_and_fenceable);
3323
		if (ret)
3324
			return ret;
3325
	}
J
Jesse Barnes 已提交
3326

3327 3328 3329
	if (obj->pin_count++ == 0) {
		if (!obj->active)
			list_move_tail(&obj->mm_list,
C
Chris Wilson 已提交
3330
				       &dev_priv->mm.pinned_list);
3331
	}
3332
	obj->pin_mappable |= map_and_fenceable;
3333

3334
	WARN_ON(i915_verify_lists(dev));
3335 3336 3337 3338
	return 0;
}

void
3339
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3340
{
3341
	struct drm_device *dev = obj->base.dev;
3342 3343
	drm_i915_private_t *dev_priv = dev->dev_private;

3344
	WARN_ON(i915_verify_lists(dev));
3345 3346
	BUG_ON(obj->pin_count == 0);
	BUG_ON(obj->gtt_space == NULL);
3347

3348 3349 3350
	if (--obj->pin_count == 0) {
		if (!obj->active)
			list_move_tail(&obj->mm_list,
3351
				       &dev_priv->mm.inactive_list);
3352
		obj->pin_mappable = false;
3353
	}
3354
	WARN_ON(i915_verify_lists(dev));
3355 3356 3357 3358
}

int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3359
		   struct drm_file *file)
3360 3361
{
	struct drm_i915_gem_pin *args = data;
3362
	struct drm_i915_gem_object *obj;
3363 3364
	int ret;

3365 3366 3367
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
3368

3369
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3370
	if (&obj->base == NULL) {
3371 3372
		ret = -ENOENT;
		goto unlock;
3373 3374
	}

3375
	if (obj->madv != I915_MADV_WILLNEED) {
C
Chris Wilson 已提交
3376
		DRM_ERROR("Attempting to pin a purgeable buffer\n");
3377 3378
		ret = -EINVAL;
		goto out;
3379 3380
	}

3381
	if (obj->pin_filp != NULL && obj->pin_filp != file) {
J
Jesse Barnes 已提交
3382 3383
		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
			  args->handle);
3384 3385
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
3386 3387
	}

3388 3389 3390
	obj->user_pin_count++;
	obj->pin_filp = file;
	if (obj->user_pin_count == 1) {
3391
		ret = i915_gem_object_pin(obj, args->alignment, true);
3392 3393
		if (ret)
			goto out;
3394 3395 3396 3397 3398
	}

	/* XXX - flush the CPU caches for pinned objects
	 * as the X server doesn't manage domains yet
	 */
3399
	i915_gem_object_flush_cpu_write_domain(obj);
3400
	args->offset = obj->gtt_offset;
3401
out:
3402
	drm_gem_object_unreference(&obj->base);
3403
unlock:
3404
	mutex_unlock(&dev->struct_mutex);
3405
	return ret;
3406 3407 3408 3409
}

int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3410
		     struct drm_file *file)
3411 3412
{
	struct drm_i915_gem_pin *args = data;
3413
	struct drm_i915_gem_object *obj;
3414
	int ret;
3415

3416 3417 3418
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;
3419

3420
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3421
	if (&obj->base == NULL) {
3422 3423
		ret = -ENOENT;
		goto unlock;
3424
	}
3425

3426
	if (obj->pin_filp != file) {
J
Jesse Barnes 已提交
3427 3428
		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
			  args->handle);
3429 3430
		ret = -EINVAL;
		goto out;
J
Jesse Barnes 已提交
3431
	}
3432 3433 3434
	obj->user_pin_count--;
	if (obj->user_pin_count == 0) {
		obj->pin_filp = NULL;
J
Jesse Barnes 已提交
3435 3436
		i915_gem_object_unpin(obj);
	}
3437

3438
out:
3439
	drm_gem_object_unreference(&obj->base);
3440
unlock:
3441
	mutex_unlock(&dev->struct_mutex);
3442
	return ret;
3443 3444 3445 3446
}

int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3447
		    struct drm_file *file)
3448 3449
{
	struct drm_i915_gem_busy *args = data;
3450
	struct drm_i915_gem_object *obj;
3451 3452
	int ret;

3453
	ret = i915_mutex_lock_interruptible(dev);
3454
	if (ret)
3455
		return ret;
3456

3457
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3458
	if (&obj->base == NULL) {
3459 3460
		ret = -ENOENT;
		goto unlock;
3461
	}
3462

3463 3464 3465 3466
	/* Count all active objects as busy, even if they are currently not used
	 * by the gpu. Users of this interface expect objects to eventually
	 * become non-busy without any further actions, therefore emit any
	 * necessary flushes here.
3467
	 */
3468
	args->busy = obj->active;
3469 3470 3471 3472 3473 3474
	if (args->busy) {
		/* Unconditionally flush objects, even when the gpu still uses this
		 * object. Userspace calling this function indicates that it wants to
		 * use this buffer rather sooner than later, so issuing the required
		 * flush earlier is beneficial.
		 */
3475
		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
C
Chris Wilson 已提交
3476
			ret = i915_gem_flush_ring(obj->ring,
3477
						  0, obj->base.write_domain);
3478 3479 3480 3481
		} else if (obj->ring->outstanding_lazy_request ==
			   obj->last_rendering_seqno) {
			struct drm_i915_gem_request *request;

3482 3483 3484
			/* This ring is not being cleared by active usage,
			 * so emit a request to do so.
			 */
3485 3486
			request = kzalloc(sizeof(*request), GFP_KERNEL);
			if (request)
C
Chris Wilson 已提交
3487
				ret = i915_add_request(obj->ring, NULL,request);
3488
			else
3489 3490
				ret = -ENOMEM;
		}
3491 3492 3493 3494 3495 3496

		/* Update the active list for the hardware's current position.
		 * Otherwise this only updates on a delayed timer or when irqs
		 * are actually unmasked, and our working set ends up being
		 * larger than required.
		 */
C
Chris Wilson 已提交
3497
		i915_gem_retire_requests_ring(obj->ring);
3498

3499
		args->busy = obj->active;
3500
	}
3501

3502
	drm_gem_object_unreference(&obj->base);
3503
unlock:
3504
	mutex_unlock(&dev->struct_mutex);
3505
	return ret;
3506 3507 3508 3509 3510 3511 3512 3513 3514
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
    return i915_gem_ring_throttle(dev, file_priv);
}

3515 3516 3517 3518 3519
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct drm_i915_gem_madvise *args = data;
3520
	struct drm_i915_gem_object *obj;
3521
	int ret;
3522 3523 3524 3525 3526 3527 3528 3529 3530

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

3531 3532 3533 3534
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ret;

3535
	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3536
	if (&obj->base == NULL) {
3537 3538
		ret = -ENOENT;
		goto unlock;
3539 3540
	}

3541
	if (obj->pin_count) {
3542 3543
		ret = -EINVAL;
		goto out;
3544 3545
	}

3546 3547
	if (obj->madv != __I915_MADV_PURGED)
		obj->madv = args->madv;
3548

3549
	/* if the object is no longer bound, discard its backing storage */
3550 3551
	if (i915_gem_object_is_purgeable(obj) &&
	    obj->gtt_space == NULL)
3552 3553
		i915_gem_object_truncate(obj);

3554
	args->retained = obj->madv != __I915_MADV_PURGED;
C
Chris Wilson 已提交
3555

3556
out:
3557
	drm_gem_object_unreference(&obj->base);
3558
unlock:
3559
	mutex_unlock(&dev->struct_mutex);
3560
	return ret;
3561 3562
}

3563 3564
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
						  size_t size)
3565
{
3566
	struct drm_i915_private *dev_priv = dev->dev_private;
3567
	struct drm_i915_gem_object *obj;
3568

3569 3570 3571
	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
	if (obj == NULL)
		return NULL;
3572

3573 3574 3575 3576
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
		kfree(obj);
		return NULL;
	}
3577

3578 3579
	i915_gem_info_add_obj(dev_priv, size);

3580 3581
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3582

3583
	obj->cache_level = I915_CACHE_NONE;
3584
	obj->base.driver_private = NULL;
3585
	obj->fence_reg = I915_FENCE_REG_NONE;
3586
	INIT_LIST_HEAD(&obj->mm_list);
D
Daniel Vetter 已提交
3587
	INIT_LIST_HEAD(&obj->gtt_list);
3588
	INIT_LIST_HEAD(&obj->ring_list);
3589
	INIT_LIST_HEAD(&obj->exec_list);
3590 3591
	INIT_LIST_HEAD(&obj->gpu_write_list);
	obj->madv = I915_MADV_WILLNEED;
3592 3593
	/* Avoid an unnecessary call to unbind on the first bind. */
	obj->map_and_fenceable = true;
3594

3595
	return obj;
3596 3597 3598 3599 3600
}

int i915_gem_init_object(struct drm_gem_object *obj)
{
	BUG();
3601

3602 3603 3604
	return 0;
}

3605
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3606
{
3607
	struct drm_device *dev = obj->base.dev;
3608 3609
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
3610

3611 3612
	ret = i915_gem_object_unbind(obj);
	if (ret == -ERESTARTSYS) {
3613
		list_move(&obj->mm_list,
3614 3615 3616
			  &dev_priv->mm.deferred_free_list);
		return;
	}
3617

3618 3619
	trace_i915_gem_object_destroy(obj);

3620
	if (obj->base.map_list.map)
3621
		i915_gem_free_mmap_offset(obj);
3622

3623 3624
	drm_gem_object_release(&obj->base);
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
3625

3626 3627 3628
	kfree(obj->page_cpu_valid);
	kfree(obj->bit_17);
	kfree(obj);
3629 3630
}

3631
void i915_gem_free_object(struct drm_gem_object *gem_obj)
3632
{
3633 3634
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
	struct drm_device *dev = obj->base.dev;
3635

3636
	while (obj->pin_count > 0)
3637 3638
		i915_gem_object_unpin(obj);

3639
	if (obj->phys_obj)
3640 3641 3642 3643 3644
		i915_gem_detach_phys_object(dev, obj);

	i915_gem_free_object_tail(obj);
}

3645 3646 3647 3648 3649
int
i915_gem_idle(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
3650

3651
	mutex_lock(&dev->struct_mutex);
C
Chris Wilson 已提交
3652

3653
	if (dev_priv->mm.suspended) {
3654 3655
		mutex_unlock(&dev->struct_mutex);
		return 0;
3656 3657
	}

3658
	ret = i915_gpu_idle(dev);
3659 3660
	if (ret) {
		mutex_unlock(&dev->struct_mutex);
3661
		return ret;
3662
	}
3663

3664 3665
	/* Under UMS, be paranoid and evict. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3666
		ret = i915_gem_evict_inactive(dev, false);
3667 3668 3669 3670 3671 3672
		if (ret) {
			mutex_unlock(&dev->struct_mutex);
			return ret;
		}
	}

3673 3674
	i915_gem_reset_fences(dev);

3675 3676 3677 3678 3679
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
	 * We need to replace this with a semaphore, or something.
	 * And not confound mm.suspended!
	 */
	dev_priv->mm.suspended = 1;
3680
	del_timer_sync(&dev_priv->hangcheck_timer);
3681 3682

	i915_kernel_lost_context(dev);
3683
	i915_gem_cleanup_ringbuffer(dev);
3684

3685 3686
	mutex_unlock(&dev->struct_mutex);

3687 3688 3689
	/* Cancel the retire work handler, which should be idle now. */
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);

3690 3691 3692
	return 0;
}

3693 3694 3695 3696 3697
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret;
3698

3699
	ret = intel_init_render_ring_buffer(dev);
3700
	if (ret)
3701
		return ret;
3702 3703

	if (HAS_BSD(dev)) {
3704
		ret = intel_init_bsd_ring_buffer(dev);
3705 3706
		if (ret)
			goto cleanup_render_ring;
3707
	}
3708

3709 3710 3711 3712 3713 3714
	if (HAS_BLT(dev)) {
		ret = intel_init_blt_ring_buffer(dev);
		if (ret)
			goto cleanup_bsd_ring;
	}

3715 3716
	dev_priv->next_seqno = 1;

3717 3718
	return 0;

3719
cleanup_bsd_ring:
3720
	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3721
cleanup_render_ring:
3722
	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3723 3724 3725 3726 3727 3728 3729
	return ret;
}

void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
3730
	int i;
3731

3732 3733
	for (i = 0; i < I915_NUM_RINGS; i++)
		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3734 3735
}

3736 3737 3738 3739 3740
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
3741
	int ret, i;
3742

J
Jesse Barnes 已提交
3743 3744 3745
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

3746
	if (atomic_read(&dev_priv->mm.wedged)) {
3747
		DRM_ERROR("Reenabling wedged hardware, good luck\n");
3748
		atomic_set(&dev_priv->mm.wedged, 0);
3749 3750 3751
	}

	mutex_lock(&dev->struct_mutex);
3752 3753 3754
	dev_priv->mm.suspended = 0;

	ret = i915_gem_init_ringbuffer(dev);
3755 3756
	if (ret != 0) {
		mutex_unlock(&dev->struct_mutex);
3757
		return ret;
3758
	}
3759

3760
	BUG_ON(!list_empty(&dev_priv->mm.active_list));
3761 3762
	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3763 3764 3765 3766
	for (i = 0; i < I915_NUM_RINGS; i++) {
		BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
		BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
	}
3767
	mutex_unlock(&dev->struct_mutex);
3768

3769 3770 3771
	ret = drm_irq_install(dev);
	if (ret)
		goto cleanup_ringbuffer;
3772

3773
	return 0;
3774 3775 3776 3777 3778 3779 3780 3781

cleanup_ringbuffer:
	mutex_lock(&dev->struct_mutex);
	i915_gem_cleanup_ringbuffer(dev);
	dev_priv->mm.suspended = 1;
	mutex_unlock(&dev->struct_mutex);

	return ret;
3782 3783 3784 3785 3786 3787
}

int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
J
Jesse Barnes 已提交
3788 3789 3790
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return 0;

3791
	drm_irq_uninstall(dev);
3792
	return i915_gem_idle(dev);
3793 3794 3795 3796 3797 3798 3799
}

void
i915_gem_lastclose(struct drm_device *dev)
{
	int ret;

3800 3801 3802
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return;

3803 3804 3805
	ret = i915_gem_idle(dev);
	if (ret)
		DRM_ERROR("failed to idle hardware: %d\n", ret);
3806 3807
}

3808 3809 3810 3811 3812 3813 3814 3815
static void
init_ring_lists(struct intel_ring_buffer *ring)
{
	INIT_LIST_HEAD(&ring->active_list);
	INIT_LIST_HEAD(&ring->request_list);
	INIT_LIST_HEAD(&ring->gpu_write_list);
}

3816 3817 3818
void
i915_gem_load(struct drm_device *dev)
{
3819
	int i;
3820 3821
	drm_i915_private_t *dev_priv = dev->dev_private;

3822
	INIT_LIST_HEAD(&dev_priv->mm.active_list);
3823 3824
	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
C
Chris Wilson 已提交
3825
	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3826
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3827
	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
D
Daniel Vetter 已提交
3828
	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3829 3830
	for (i = 0; i < I915_NUM_RINGS; i++)
		init_ring_lists(&dev_priv->ring[i]);
3831 3832
	for (i = 0; i < 16; i++)
		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3833 3834
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
			  i915_gem_retire_work_handler);
3835
	init_completion(&dev_priv->error_completion);
3836

3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
	if (IS_GEN3(dev)) {
		u32 tmp = I915_READ(MI_ARB_STATE);
		if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
			/* arb state is a masked write, so set bit + bit in mask */
			tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
			I915_WRITE(MI_ARB_STATE, tmp);
		}
	}

3847 3848
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

3849
	/* Old X drivers will take 0-2 for front, back, depth buffers */
3850 3851
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		dev_priv->fence_reg_start = 3;
3852

3853
	if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3854 3855 3856 3857
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

3858
	/* Initialize fence registers to zero */
3859 3860
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3861
	}
3862

3863
	i915_gem_detect_bit_6_swizzle(dev);
3864
	init_waitqueue_head(&dev_priv->pending_flip_queue);
3865

3866 3867
	dev_priv->mm.interruptible = true;

3868 3869 3870
	dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&dev_priv->mm.inactive_shrinker);
3871
}
3872 3873 3874 3875 3876

/*
 * Create a physically contiguous memory object for this object
 * e.g. for cursor + overlay regs
 */
3877 3878
static int i915_gem_init_phys_object(struct drm_device *dev,
				     int id, int size, int align)
3879 3880 3881 3882 3883 3884 3885 3886
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_phys_object *phys_obj;
	int ret;

	if (dev_priv->mm.phys_objs[id - 1] || !size)
		return 0;

3887
	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3888 3889 3890 3891 3892
	if (!phys_obj)
		return -ENOMEM;

	phys_obj->id = id;

3893
	phys_obj->handle = drm_pci_alloc(dev, size, align);
3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905
	if (!phys_obj->handle) {
		ret = -ENOMEM;
		goto kfree_obj;
	}
#ifdef CONFIG_X86
	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif

	dev_priv->mm.phys_objs[id - 1] = phys_obj;

	return 0;
kfree_obj:
3906
	kfree(phys_obj);
3907 3908 3909
	return ret;
}

3910
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_i915_gem_phys_object *phys_obj;

	if (!dev_priv->mm.phys_objs[id - 1])
		return;

	phys_obj = dev_priv->mm.phys_objs[id - 1];
	if (phys_obj->cur_obj) {
		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
	}

#ifdef CONFIG_X86
	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
#endif
	drm_pci_free(dev, phys_obj->handle);
	kfree(phys_obj);
	dev_priv->mm.phys_objs[id - 1] = NULL;
}

void i915_gem_free_all_phys_object(struct drm_device *dev)
{
	int i;

3935
	for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3936 3937 3938 3939
		i915_gem_free_phys_object(dev, i);
}

void i915_gem_detach_phys_object(struct drm_device *dev,
3940
				 struct drm_i915_gem_object *obj)
3941
{
3942
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3943
	char *vaddr;
3944 3945 3946
	int i;
	int page_count;

3947
	if (!obj->phys_obj)
3948
		return;
3949
	vaddr = obj->phys_obj->handle->vaddr;
3950

3951
	page_count = obj->base.size / PAGE_SIZE;
3952
	for (i = 0; i < page_count; i++) {
3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965
		struct page *page = read_cache_page_gfp(mapping, i,
							GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (!IS_ERR(page)) {
			char *dst = kmap_atomic(page);
			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
			kunmap_atomic(dst);

			drm_clflush_pages(&page, 1);

			set_page_dirty(page);
			mark_page_accessed(page);
			page_cache_release(page);
		}
3966
	}
3967
	intel_gtt_chipset_flush();
3968

3969 3970
	obj->phys_obj->cur_obj = NULL;
	obj->phys_obj = NULL;
3971 3972 3973 3974
}

int
i915_gem_attach_phys_object(struct drm_device *dev,
3975
			    struct drm_i915_gem_object *obj,
3976 3977
			    int id,
			    int align)
3978
{
3979
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3980 3981 3982 3983 3984 3985 3986 3987
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret = 0;
	int page_count;
	int i;

	if (id > I915_MAX_PHYS_OBJECT)
		return -EINVAL;

3988 3989
	if (obj->phys_obj) {
		if (obj->phys_obj->id == id)
3990 3991 3992 3993 3994 3995 3996
			return 0;
		i915_gem_detach_phys_object(dev, obj);
	}

	/* create a new object */
	if (!dev_priv->mm.phys_objs[id - 1]) {
		ret = i915_gem_init_phys_object(dev, id,
3997
						obj->base.size, align);
3998
		if (ret) {
3999 4000
			DRM_ERROR("failed to init phys object %d size: %zu\n",
				  id, obj->base.size);
4001
			return ret;
4002 4003 4004 4005
		}
	}

	/* bind to the object */
4006 4007
	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
	obj->phys_obj->cur_obj = obj;
4008

4009
	page_count = obj->base.size / PAGE_SIZE;
4010 4011

	for (i = 0; i < page_count; i++) {
4012 4013 4014 4015 4016 4017 4018
		struct page *page;
		char *dst, *src;

		page = read_cache_page_gfp(mapping, i,
					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
		if (IS_ERR(page))
			return PTR_ERR(page);
4019

4020
		src = kmap_atomic(page);
4021
		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4022
		memcpy(dst, src, PAGE_SIZE);
P
Peter Zijlstra 已提交
4023
		kunmap_atomic(src);
4024

4025 4026 4027
		mark_page_accessed(page);
		page_cache_release(page);
	}
4028

4029 4030 4031 4032
	return 0;
}

static int
4033 4034
i915_gem_phys_pwrite(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
4035 4036 4037
		     struct drm_i915_gem_pwrite *args,
		     struct drm_file *file_priv)
{
4038
	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4039
	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4040

4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
		if (unwritten)
			return -EFAULT;
	}
4054

4055
	intel_gtt_chipset_flush();
4056 4057
	return 0;
}
4058

4059
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4060
{
4061
	struct drm_i915_file_private *file_priv = file->driver_priv;
4062 4063 4064 4065 4066

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4067
	spin_lock(&file_priv->mm.lock);
4068 4069 4070 4071 4072 4073 4074 4075 4076
	while (!list_empty(&file_priv->mm.request_list)) {
		struct drm_i915_gem_request *request;

		request = list_first_entry(&file_priv->mm.request_list,
					   struct drm_i915_gem_request,
					   client_list);
		list_del(&request->client_list);
		request->file_priv = NULL;
	}
4077
	spin_unlock(&file_priv->mm.lock);
4078
}
4079

4080 4081 4082 4083 4084 4085 4086
static int
i915_gpu_is_active(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int lists_empty;

	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4087
		      list_empty(&dev_priv->mm.active_list);
4088 4089 4090 4091

	return !lists_empty;
}

4092
static int
4093
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4094
{
4095 4096 4097 4098 4099 4100
	struct drm_i915_private *dev_priv =
		container_of(shrinker,
			     struct drm_i915_private,
			     mm.inactive_shrinker);
	struct drm_device *dev = dev_priv->dev;
	struct drm_i915_gem_object *obj, *next;
4101
	int nr_to_scan = sc->nr_to_scan;
4102 4103 4104
	int cnt;

	if (!mutex_trylock(&dev->struct_mutex))
4105
		return 0;
4106 4107 4108

	/* "fast-path" to count number of available objects */
	if (nr_to_scan == 0) {
4109 4110 4111 4112 4113 4114 4115
		cnt = 0;
		list_for_each_entry(obj,
				    &dev_priv->mm.inactive_list,
				    mm_list)
			cnt++;
		mutex_unlock(&dev->struct_mutex);
		return cnt / 100 * sysctl_vfs_cache_pressure;
4116 4117
	}

4118
rescan:
4119
	/* first scan for clean buffers */
4120
	i915_gem_retire_requests(dev);
4121

4122 4123 4124 4125
	list_for_each_entry_safe(obj, next,
				 &dev_priv->mm.inactive_list,
				 mm_list) {
		if (i915_gem_object_is_purgeable(obj)) {
4126 4127
			if (i915_gem_object_unbind(obj) == 0 &&
			    --nr_to_scan == 0)
4128
				break;
4129 4130 4131 4132
		}
	}

	/* second pass, evict/count anything still on the inactive list */
4133 4134 4135 4136
	cnt = 0;
	list_for_each_entry_safe(obj, next,
				 &dev_priv->mm.inactive_list,
				 mm_list) {
4137 4138
		if (nr_to_scan &&
		    i915_gem_object_unbind(obj) == 0)
4139
			nr_to_scan--;
4140
		else
4141 4142 4143 4144
			cnt++;
	}

	if (nr_to_scan && i915_gpu_is_active(dev)) {
4145 4146 4147 4148 4149 4150
		/*
		 * We are desperate for pages, so as a last resort, wait
		 * for the GPU to finish and discard whatever we can.
		 * This has a dramatic impact to reduce the number of
		 * OOM-killer events whilst running the GPU aggressively.
		 */
4151
		if (i915_gpu_idle(dev) == 0)
4152 4153
			goto rescan;
	}
4154 4155
	mutex_unlock(&dev->struct_mutex);
	return cnt / 100 * sysctl_vfs_cache_pressure;
4156
}