i915_gem_execbuffer.c 35.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

struct change_domains {
	uint32_t invalidate_domains;
	uint32_t flush_domains;
	uint32_t flush_rings;
40
	uint32_t flips;
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
};

/*
 * Set the next domain for the specified object. This
 * may not actually perform the necessary flushing/invaliding though,
 * as that may want to be batched with other set_domain operations
 *
 * This is (we hope) the only really tricky part of gem. The goal
 * is fairly simple -- track which caches hold bits of the object
 * and make sure they remain coherent. A few concrete examples may
 * help to explain how it works. For shorthand, we use the notation
 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
 * a pair of read and write domain masks.
 *
 * Case 1: the batch buffer
 *
 *	1. Allocated
 *	2. Written by CPU
 *	3. Mapped to GTT
 *	4. Read by GPU
 *	5. Unmapped from GTT
 *	6. Freed
 *
 *	Let's take these a step at a time
 *
 *	1. Allocated
 *		Pages allocated from the kernel may still have
 *		cache contents, so we set them to (CPU, CPU) always.
 *	2. Written by CPU (using pwrite)
 *		The pwrite function calls set_domain (CPU, CPU) and
 *		this function does nothing (as nothing changes)
 *	3. Mapped by GTT
 *		This function asserts that the object is not
 *		currently in any GPU-based read or write domains
 *	4. Read by GPU
 *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
 *		As write_domain is zero, this function adds in the
 *		current read domains (CPU+COMMAND, 0).
 *		flush_domains is set to CPU.
 *		invalidate_domains is set to COMMAND
 *		clflush is run to get data out of the CPU caches
 *		then i915_dev_set_domain calls i915_gem_flush to
 *		emit an MI_FLUSH and drm_agp_chipset_flush
 *	5. Unmapped from GTT
 *		i915_gem_object_unbind calls set_domain (CPU, CPU)
 *		flush_domains and invalidate_domains end up both zero
 *		so no flushing/invalidating happens
 *	6. Freed
 *		yay, done
 *
 * Case 2: The shared render buffer
 *
 *	1. Allocated
 *	2. Mapped to GTT
 *	3. Read/written by GPU
 *	4. set_domain to (CPU,CPU)
 *	5. Read/written by CPU
 *	6. Read/written by GPU
 *
 *	1. Allocated
 *		Same as last example, (CPU, CPU)
 *	2. Mapped to GTT
 *		Nothing changes (assertions find that it is not in the GPU)
 *	3. Read/written by GPU
 *		execbuffer calls set_domain (RENDER, RENDER)
 *		flush_domains gets CPU
 *		invalidate_domains gets GPU
 *		clflush (obj)
 *		MI_FLUSH and drm_agp_chipset_flush
 *	4. set_domain (CPU, CPU)
 *		flush_domains gets GPU
 *		invalidate_domains gets CPU
 *		wait_rendering (obj) to make sure all drawing is complete.
 *		This will include an MI_FLUSH to get the data from GPU
 *		to memory
 *		clflush (obj) to invalidate the CPU cache
 *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
 *	5. Read/written by CPU
 *		cache lines are loaded and dirtied
 *	6. Read written by GPU
 *		Same as last GPU access
 *
 * Case 3: The constant buffer
 *
 *	1. Allocated
 *	2. Written by CPU
 *	3. Read by GPU
 *	4. Updated (written) by CPU again
 *	5. Read by GPU
 *
 *	1. Allocated
 *		(CPU, CPU)
 *	2. Written by CPU
 *		(CPU, CPU)
 *	3. Read by GPU
 *		(CPU+RENDER, 0)
 *		flush_domains = CPU
 *		invalidate_domains = RENDER
 *		clflush (obj)
 *		MI_FLUSH
 *		drm_agp_chipset_flush
 *	4. Updated (written) by CPU again
 *		(CPU, CPU)
 *		flush_domains = 0 (no previous write domain)
 *		invalidate_domains = 0 (no new read domains)
 *	5. Read by GPU
 *		(CPU+RENDER, 0)
 *		flush_domains = CPU
 *		invalidate_domains = RENDER
 *		clflush (obj)
 *		MI_FLUSH
 *		drm_agp_chipset_flush
 */
static void
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
				  struct intel_ring_buffer *ring,
				  struct change_domains *cd)
{
	uint32_t invalidate_domains = 0, flush_domains = 0;

	/*
	 * If the object isn't moving to a new write domain,
	 * let the object stay in multiple read domains
	 */
	if (obj->base.pending_write_domain == 0)
		obj->base.pending_read_domains |= obj->base.read_domains;

	/*
	 * Flush the current write domain if
	 * the new read domains don't match. Invalidate
	 * any read domains which differ from the old
	 * write domain
	 */
	if (obj->base.write_domain &&
	    (((obj->base.write_domain != obj->base.pending_read_domains ||
	       obj->ring != ring)) ||
	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
		flush_domains |= obj->base.write_domain;
		invalidate_domains |=
			obj->base.pending_read_domains & ~obj->base.write_domain;
	}
	/*
	 * Invalidate any read caches which may have
	 * stale data. That is, any new read domains.
	 */
	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
		i915_gem_clflush_object(obj);

	/* blow away mappings if mapped through GTT */
	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
		i915_gem_release_mmap(obj);

194 195 196
	if (obj->base.pending_write_domain)
		cd->flips |= atomic_read(&obj->pending_flip);

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	/* The actual obj->write_domain will be updated with
	 * pending_write_domain after we emit the accumulated flush for all
	 * of our domain changes in execbuffers (which clears objects'
	 * write_domains).  So if we have a current write domain that we
	 * aren't changing, set pending_write_domain to that.
	 */
	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
		obj->base.pending_write_domain = obj->base.write_domain;

	cd->invalidate_domains |= invalidate_domains;
	cd->flush_domains |= flush_domains;
	if (flush_domains & I915_GEM_GPU_DOMAINS)
		cd->flush_rings |= obj->ring->id;
	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
		cd->flush_rings |= ring->id;
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
struct eb_objects {
	int and;
	struct hlist_head buckets[0];
};

static struct eb_objects *
eb_create(int size)
{
	struct eb_objects *eb;
	int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
	while (count > size)
		count >>= 1;
	eb = kzalloc(count*sizeof(struct hlist_head) +
		     sizeof(struct eb_objects),
		     GFP_KERNEL);
	if (eb == NULL)
		return eb;

	eb->and = count - 1;
	return eb;
}

static void
eb_reset(struct eb_objects *eb)
{
	memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}

static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
{
	hlist_add_head(&obj->exec_node,
		       &eb->buckets[obj->exec_handle & eb->and]);
}

static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct drm_i915_gem_object *obj;

	head = &eb->buckets[handle & eb->and];
	hlist_for_each(node, head) {
		obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
		if (obj->exec_handle == handle)
			return obj;
	}

	return NULL;
}

static void
eb_destroy(struct eb_objects *eb)
{
	kfree(eb);
}

272 273
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
274
				   struct eb_objects *eb,
275 276 277 278 279 280 281
				   struct drm_i915_gem_relocation_entry *reloc)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
	uint32_t target_offset;
	int ret = -EINVAL;

282 283 284
	/* we've already hold a reference to all valid objects */
	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
	if (unlikely(target_obj == NULL))
285 286 287 288 289 290 291
		return -ENOENT;

	target_offset = to_intel_bo(target_obj)->gtt_offset;

	/* The target buffer should have appeared before us in the
	 * exec_object list, so it should have a GTT space bound by now.
	 */
292
	if (unlikely(target_offset == 0)) {
293 294
		DRM_ERROR("No GTT space found for object %d\n",
			  reloc->target_handle);
295
		return ret;
296 297 298
	}

	/* Validate that the target is in a valid r/w GPU domain */
299
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
300 301 302 303 304 305 306
		DRM_ERROR("reloc with multiple write domains: "
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
307
		return ret;
308
	}
309
	if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
310 311 312 313 314 315 316
		DRM_ERROR("reloc with read/write CPU domains: "
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
317
		return ret;
318
	}
319 320
	if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
		     reloc->write_domain != target_obj->pending_write_domain)) {
321 322 323 324 325 326 327
		DRM_ERROR("Write domain conflict: "
			  "obj %p target %d offset %d "
			  "new %08x old %08x\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->write_domain,
			  target_obj->pending_write_domain);
328
		return ret;
329 330 331 332 333 334 335 336 337
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
338
		return 0;
339 340

	/* Check that the relocation address is valid... */
341
	if (unlikely(reloc->offset > obj->base.size - 4)) {
342 343 344 345 346
		DRM_ERROR("Relocation beyond object bounds: "
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
347
		return ret;
348
	}
349
	if (unlikely(reloc->offset & 3)) {
350 351 352 353
		DRM_ERROR("Relocation not 4-byte aligned: "
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
354
		return ret;
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	}

	reloc->delta += target_offset;
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
		char *vaddr;

		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
		kunmap_atomic(vaddr);
	} else {
		struct drm_i915_private *dev_priv = dev->dev_private;
		uint32_t __iomem *reloc_entry;
		void __iomem *reloc_page;

370 371 372 373
		/* We can't wait for rendering with pagefaults disabled */
		if (obj->active && in_atomic())
			return -EFAULT;

374 375
		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
		if (ret)
376
			return ret;
377 378 379 380 381 382 383 384 385 386 387 388 389 390

		/* Map the page containing the relocation we're going to perform.  */
		reloc->offset += obj->gtt_offset;
		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
						      reloc->offset & PAGE_MASK);
		reloc_entry = (uint32_t __iomem *)
			(reloc_page + (reloc->offset & ~PAGE_MASK));
		iowrite32(reloc->delta, reloc_entry);
		io_mapping_unmap_atomic(reloc_page);
	}

	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

391
	return 0;
392 393 394 395
}

static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
396
				    struct eb_objects *eb)
397 398
{
	struct drm_i915_gem_relocation_entry __user *user_relocs;
399
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
400 401 402 403 404 405 406 407 408 409 410
	int i, ret;

	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
	for (i = 0; i < entry->relocation_count; i++) {
		struct drm_i915_gem_relocation_entry reloc;

		if (__copy_from_user_inatomic(&reloc,
					      user_relocs+i,
					      sizeof(reloc)))
			return -EFAULT;

411
		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
412 413 414 415 416 417 418 419 420 421 422 423 424 425
		if (ret)
			return ret;

		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
					    &reloc.presumed_offset,
					    sizeof(reloc.presumed_offset)))
			return -EFAULT;
	}

	return 0;
}

static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
426
					 struct eb_objects *eb,
427 428
					 struct drm_i915_gem_relocation_entry *relocs)
{
429
	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
430 431 432
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
433
		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
434 435 436 437 438 439 440 441 442
		if (ret)
			return ret;
	}

	return 0;
}

static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
443
			     struct eb_objects *eb,
444
			     struct list_head *objects)
445
{
446
	struct drm_i915_gem_object *obj;
447 448 449 450 451 452 453 454 455 456
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
457
	list_for_each_entry(obj, objects, exec_list) {
458
		ret = i915_gem_execbuffer_relocate_object(obj, eb);
459
		if (ret)
460
			break;
461
	}
462
	pagefault_enable();
463

464
	return ret;
465 466 467
}

static int
468
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
469
			    struct drm_file *file,
470
			    struct list_head *objects)
471
{
472 473
	struct drm_i915_gem_object *obj;
	int ret, retry;
474
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
	struct list_head ordered_objects;

	INIT_LIST_HEAD(&ordered_objects);
	while (!list_empty(objects)) {
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

		obj = list_first_entry(objects,
				       struct drm_i915_gem_object,
				       exec_list);
		entry = obj->exec_entry;

		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
		need_mappable =
			entry->relocation_count ? true : need_fence;

		if (need_mappable)
			list_move(&obj->exec_list, &ordered_objects);
		else
			list_move_tail(&obj->exec_list, &ordered_objects);
498 499 500

		obj->base.pending_read_domains = 0;
		obj->base.pending_write_domain = 0;
501 502
	}
	list_splice(&ordered_objects, objects);
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to makr
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
		ret = 0;

		/* Unbind any ill-fitting objects or pin. */
521
		list_for_each_entry(obj, objects, exec_list) {
522
			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
523
			bool need_fence, need_mappable;
524
			if (!obj->gtt_space)
525 526 527
				continue;

			need_fence =
528
				has_fenced_gpu_access &&
529 530 531 532 533 534 535 536 537 538 539 540
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;
			need_mappable =
				entry->relocation_count ? true : need_fence;

			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
			    (need_mappable && !obj->map_and_fenceable))
				ret = i915_gem_object_unbind(obj);
			else
				ret = i915_gem_object_pin(obj,
							  entry->alignment,
							  need_mappable);
541
			if (ret)
542
				goto err;
543 544

			entry++;
545 546 547
		}

		/* Bind fresh objects */
548
		list_for_each_entry(obj, objects, exec_list) {
549
			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
550 551 552
			bool need_fence;

			need_fence =
553
				has_fenced_gpu_access &&
554 555 556 557 558 559 560 561 562 563 564 565 566 567
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;

			if (!obj->gtt_space) {
				bool need_mappable =
					entry->relocation_count ? true : need_fence;

				ret = i915_gem_object_pin(obj,
							  entry->alignment,
							  need_mappable);
				if (ret)
					break;
			}

568 569
			if (has_fenced_gpu_access) {
				if (need_fence) {
570
					ret = i915_gem_object_get_fence(obj, ring);
571 572 573 574 575 576 577 578 579 580
					if (ret)
						break;
				} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
					   obj->tiling_mode == I915_TILING_NONE) {
					/* XXX pipelined! */
					ret = i915_gem_object_put_fence(obj);
					if (ret)
						break;
				}
				obj->pending_fenced_gpu_access = need_fence;
581 582 583 584 585
			}

			entry->offset = obj->gtt_offset;
		}

586 587
		/* Decrement pin count for bound objects */
		list_for_each_entry(obj, objects, exec_list) {
588 589 590 591 592 593 594 595 596 597
			if (obj->gtt_space)
				i915_gem_object_unpin(obj);
		}

		if (ret != -ENOSPC || retry > 1)
			return ret;

		/* First attempt, just clear anything that is purgeable.
		 * Second attempt, clear the entire GTT.
		 */
598
		ret = i915_gem_evict_everything(ring->dev, retry == 0);
599 600 601 602 603
		if (ret)
			return ret;

		retry++;
	} while (1);
604 605

err:
606 607 608
	obj = list_entry(obj->exec_list.prev,
			 struct drm_i915_gem_object,
			 exec_list);
609 610 611 612 613 614 615 616 617 618
	while (objects != &obj->exec_list) {
		if (obj->gtt_space)
			i915_gem_object_unpin(obj);

		obj = list_entry(obj->exec_list.prev,
				 struct drm_i915_gem_object,
				 exec_list);
	}

	return ret;
619 620 621 622 623
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
				  struct drm_file *file,
624
				  struct intel_ring_buffer *ring,
625
				  struct list_head *objects,
626
				  struct eb_objects *eb,
627
				  struct drm_i915_gem_exec_object2 *exec,
628 629 630
				  int count)
{
	struct drm_i915_gem_relocation_entry *reloc;
631
	struct drm_i915_gem_object *obj;
632
	int *reloc_offset;
633 634
	int i, total, ret;

635
	/* We may process another execbuffer during the unlock... */
636
	while (!list_empty(objects)) {
637 638 639 640 641 642 643
		obj = list_first_entry(objects,
				       struct drm_i915_gem_object,
				       exec_list);
		list_del_init(&obj->exec_list);
		drm_gem_object_unreference(&obj->base);
	}

644 645 646 647
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
648
		total += exec[i].relocation_count;
649

650
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
651
	reloc = drm_malloc_ab(total, sizeof(*reloc));
652 653 654
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
655 656 657 658 659 660 661 662
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;

663
		user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
664 665

		if (copy_from_user(reloc+total, user_relocs,
666
				   exec[i].relocation_count * sizeof(*reloc))) {
667 668 669 670 671
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

672
		reloc_offset[i] = total;
673
		total += exec[i].relocation_count;
674 675 676 677 678 679 680 681
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

682 683 684 685 686
	/* reacquire the objects */
	eb_reset(eb);
	for (i = 0; i < count; i++) {
		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
							exec[i].handle));
687
		if (&obj->base == NULL) {
688 689 690 691 692 693 694 695
			DRM_ERROR("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
			ret = -ENOENT;
			goto err;
		}

		list_add_tail(&obj->exec_list, objects);
		obj->exec_handle = exec[i].handle;
696
		obj->exec_entry = &exec[i];
697 698 699
		eb_add_object(eb, obj);
	}

700
	ret = i915_gem_execbuffer_reserve(ring, file, objects);
701 702 703
	if (ret)
		goto err;

704
	list_for_each_entry(obj, objects, exec_list) {
705
		int offset = obj->exec_entry - exec;
706
		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
707
							       reloc + reloc_offset[offset]);
708 709 710 711 712 713 714 715 716 717 718 719
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
720
	drm_free_large(reloc_offset);
721 722 723
	return ret;
}

724
static int
725 726 727 728 729 730
i915_gem_execbuffer_flush(struct drm_device *dev,
			  uint32_t invalidate_domains,
			  uint32_t flush_domains,
			  uint32_t flush_rings)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
731
	int i, ret;
732 733 734 735

	if (flush_domains & I915_GEM_DOMAIN_CPU)
		intel_gtt_chipset_flush();

736 737 738
	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

739
	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
740
		for (i = 0; i < I915_NUM_RINGS; i++)
741
			if (flush_rings & (1 << i)) {
C
Chris Wilson 已提交
742
				ret = i915_gem_flush_ring(&dev_priv->ring[i],
743 744 745 746 747
							  invalidate_domains,
							  flush_domains);
				if (ret)
					return ret;
			}
748
	}
749 750

	return 0;
751 752
}

753 754 755 756 757 758 759 760 761 762 763
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
			       struct intel_ring_buffer *to)
{
	struct intel_ring_buffer *from = obj->ring;
	u32 seqno;
	int ret, idx;

	if (from == NULL || to == from)
		return 0;

764 765
	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
	if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
766
		return i915_gem_object_wait_rendering(obj);
767 768 769 770 771 772 773 774 775 776 777 778 779 780

	idx = intel_ring_sync_index(from, to);

	seqno = obj->last_rendering_seqno;
	if (seqno <= from->sync_seqno[idx])
		return 0;

	if (seqno == from->outstanding_lazy_request) {
		struct drm_i915_gem_request *request;

		request = kzalloc(sizeof(*request), GFP_KERNEL);
		if (request == NULL)
			return -ENOMEM;

C
Chris Wilson 已提交
781
		ret = i915_add_request(from, NULL, request);
782 783 784 785 786 787 788 789 790 791 792
		if (ret) {
			kfree(request);
			return ret;
		}

		seqno = request->seqno;
	}

	from->sync_seqno[idx] = seqno;
	return intel_ring_sync(to, from, seqno - 1);
}
793

794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
	u32 plane, flip_mask;
	int ret;

	/* Check for any pending flips. As we only maintain a flip queue depth
	 * of 1, we can simply insert a WAIT for the next display flip prior
	 * to executing the batch and avoid stalling the CPU.
	 */

	for (plane = 0; flips >> plane; plane++) {
		if (((flips >> plane) & 1) == 0)
			continue;

		if (plane)
			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
		else
			flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;

		ret = intel_ring_begin(ring, 2);
		if (ret)
			return ret;

		intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
		intel_ring_emit(ring, MI_NOOP);
		intel_ring_advance(ring);
	}

	return 0;
}


827
static int
828 829
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
				struct list_head *objects)
830
{
831
	struct drm_i915_gem_object *obj;
832
	struct change_domains cd;
833
	int ret;
834

835
	memset(&cd, 0, sizeof(cd));
836 837
	list_for_each_entry(obj, objects, exec_list)
		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
838 839

	if (cd.invalidate_domains | cd.flush_domains) {
840 841 842 843 844 845
		ret = i915_gem_execbuffer_flush(ring->dev,
						cd.invalidate_domains,
						cd.flush_domains,
						cd.flush_rings);
		if (ret)
			return ret;
846 847
	}

848 849 850 851 852 853
	if (cd.flips) {
		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
		if (ret)
			return ret;
	}

854
	list_for_each_entry(obj, objects, exec_list) {
855 856 857
		ret = i915_gem_execbuffer_sync_rings(obj, ring);
		if (ret)
			return ret;
858 859 860 861 862
	}

	return 0;
}

863 864
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
865
{
866
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;

	for (i = 0; i < count; i++) {
		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
		int length; /* limited by fault_in_pages_readable() */

		/* First check for malicious input causing overflow */
		if (exec[i].relocation_count >
		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
			return -EINVAL;

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
		if (!access_ok(VERIFY_READ, ptr, length))
			return -EFAULT;

		/* we may also need to update the presumed offsets */
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

		if (fault_in_pages_readable(ptr, length))
			return -EFAULT;
	}

	return 0;
}

900 901
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
902 903
				   struct intel_ring_buffer *ring,
				   u32 seqno)
904 905 906 907
{
	struct drm_i915_gem_object *obj;

	list_for_each_entry(obj, objects, exec_list) {
C
Chris Wilson 已提交
908 909 910 911
		  u32 old_read = obj->base.read_domains;
		  u32 old_write = obj->base.write_domain;


912 913 914 915
		obj->base.read_domains = obj->base.pending_read_domains;
		obj->base.write_domain = obj->base.pending_write_domain;
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

916
		i915_gem_object_move_to_active(obj, ring, seqno);
917 918
		if (obj->base.write_domain) {
			obj->dirty = 1;
919
			obj->pending_gpu_write = true;
920 921 922 923 924
			list_move_tail(&obj->gpu_write_list,
				       &ring->gpu_write_list);
			intel_mark_busy(ring->dev, obj);
		}

C
Chris Wilson 已提交
925
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
926 927 928
	}
}

929 930
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
931
				    struct drm_file *file,
932 933
				    struct intel_ring_buffer *ring)
{
934
	struct drm_i915_gem_request *request;
935
	u32 invalidate;
936

937 938 939 940 941 942
	/*
	 * Ensure that the commands in the batch buffer are
	 * finished before the interrupt fires.
	 *
	 * The sampler always gets flushed on i965 (sigh).
	 */
943
	invalidate = I915_GEM_DOMAIN_COMMAND;
944
	if (INTEL_INFO(dev)->gen >= 4)
945 946
		invalidate |= I915_GEM_DOMAIN_SAMPLER;
	if (ring->flush(ring, invalidate, 0)) {
C
Chris Wilson 已提交
947
		i915_gem_next_request_seqno(ring);
948 949
		return;
	}
950

951 952
	/* Add a breadcrumb for the completion of the batch buffer */
	request = kzalloc(sizeof(*request), GFP_KERNEL);
C
Chris Wilson 已提交
953 954
	if (request == NULL || i915_add_request(ring, file, request)) {
		i915_gem_next_request_seqno(ring);
955 956 957
		kfree(request);
	}
}
958 959 960 961 962

static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
963
		       struct drm_i915_gem_exec_object2 *exec)
964 965
{
	drm_i915_private_t *dev_priv = dev->dev_private;
966
	struct list_head objects;
967
	struct eb_objects *eb;
968 969 970
	struct drm_i915_gem_object *batch_obj;
	struct drm_clip_rect *cliprects = NULL;
	struct intel_ring_buffer *ring;
971
	u32 exec_start, exec_len;
972
	u32 seqno;
973
	int ret, mode, i;
974

975 976 977 978 979 980
	if (!i915_gem_check_execbuffer(args)) {
		DRM_ERROR("execbuf with invalid offset/length\n");
		return -EINVAL;
	}

	ret = validate_exec_list(exec, args->buffer_count);
981 982 983 984 985 986
	if (ret)
		return ret;

	switch (args->flags & I915_EXEC_RING_MASK) {
	case I915_EXEC_DEFAULT:
	case I915_EXEC_RENDER:
987
		ring = &dev_priv->ring[RCS];
988 989 990 991 992 993
		break;
	case I915_EXEC_BSD:
		if (!HAS_BSD(dev)) {
			DRM_ERROR("execbuf with invalid ring (BSD)\n");
			return -EINVAL;
		}
994
		ring = &dev_priv->ring[VCS];
995 996 997 998 999 1000
		break;
	case I915_EXEC_BLT:
		if (!HAS_BLT(dev)) {
			DRM_ERROR("execbuf with invalid ring (BLT)\n");
			return -EINVAL;
		}
1001
		ring = &dev_priv->ring[BCS];
1002 1003 1004 1005 1006 1007 1008
		break;
	default:
		DRM_ERROR("execbuf with unknown ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}

1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
	switch (mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (ring == &dev_priv->ring[RCS] &&
		    mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4)
				return -EINVAL;

			if (INTEL_INFO(dev)->gen > 5 &&
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
				return -EINVAL;

			ret = intel_ring_begin(ring, 4);
			if (ret)
				return ret;

			intel_ring_emit(ring, MI_NOOP);
			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
			intel_ring_emit(ring, INSTPM);
			intel_ring_emit(ring,
					I915_EXEC_CONSTANTS_MASK << 16 | mode);
			intel_ring_advance(ring);

			dev_priv->relative_constants_mode = mode;
		}
		break;
	default:
		DRM_ERROR("execbuf with unknown constants: %d\n", mode);
		return -EINVAL;
	}

1042 1043 1044 1045 1046 1047
	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}

	if (args->num_cliprects != 0) {
1048
		if (ring != &dev_priv->ring[RCS]) {
1049 1050 1051 1052
			DRM_ERROR("clip rectangles are only valid with the render ring\n");
			return -EINVAL;
		}

1053
		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1054 1055 1056 1057 1058 1059
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto pre_mutex_err;
		}

1060 1061 1062 1063
		if (copy_from_user(cliprects,
				     (struct drm_clip_rect __user *)(uintptr_t)
				     args->cliprects_ptr,
				     sizeof(*cliprects)*args->num_cliprects)) {
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
			ret = -EFAULT;
			goto pre_mutex_err;
		}
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

	if (dev_priv->mm.suspended) {
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1079 1080 1081 1082 1083 1084 1085
	eb = eb_create(args->buffer_count);
	if (eb == NULL) {
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1086
	/* Look up object handles */
1087
	INIT_LIST_HEAD(&objects);
1088 1089 1090
	for (i = 0; i < args->buffer_count; i++) {
		struct drm_i915_gem_object *obj;

1091 1092
		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
							exec[i].handle));
1093
		if (&obj->base == NULL) {
1094
			DRM_ERROR("Invalid object handle %d at index %d\n",
1095
				   exec[i].handle, i);
1096 1097 1098 1099 1100
			/* prevent error path from reading uninitialized data */
			ret = -ENOENT;
			goto err;
		}

1101 1102 1103
		if (!list_empty(&obj->exec_list)) {
			DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
1104 1105 1106
			ret = -EINVAL;
			goto err;
		}
1107 1108

		list_add_tail(&obj->exec_list, &objects);
1109
		obj->exec_handle = exec[i].handle;
1110
		obj->exec_entry = &exec[i];
1111
		eb_add_object(eb, obj);
1112 1113
	}

1114 1115 1116 1117 1118
	/* take note of the batch buffer before we might reorder the lists */
	batch_obj = list_entry(objects.prev,
			       struct drm_i915_gem_object,
			       exec_list);

1119
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1120
	ret = i915_gem_execbuffer_reserve(ring, file, &objects);
1121 1122 1123 1124
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1125
	ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
1126 1127
	if (ret) {
		if (ret == -EFAULT) {
1128
			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
1129 1130
								&objects, eb,
								exec,
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
								args->buffer_count);
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1146 1147
	ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
	if (ret)
1148 1149
		goto err;

C
Chris Wilson 已提交
1150
	seqno = i915_gem_next_request_seqno(ring);
1151
	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
		if (seqno < ring->sync_seqno[i]) {
			/* The GPU can not handle its semaphore value wrapping,
			 * so every billion or so execbuffers, we need to stall
			 * the GPU in order to reset the counters.
			 */
			ret = i915_gpu_idle(dev);
			if (ret)
				goto err;

			BUG_ON(ring->sync_seqno[i]);
		}
	}

C
Chris Wilson 已提交
1165 1166
	trace_i915_gem_ring_dispatch(ring, seqno);

1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto err;

			ret = ring->dispatch_execbuffer(ring,
							exec_start, exec_len);
			if (ret)
				goto err;
		}
	} else {
		ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
		if (ret)
			goto err;
	}
1186

1187
	i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1188
	i915_gem_execbuffer_retire_commands(dev, file, ring);
1189 1190

err:
1191
	eb_destroy(eb);
1192 1193 1194 1195 1196 1197 1198 1199
	while (!list_empty(&objects)) {
		struct drm_i915_gem_object *obj;

		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       exec_list);
		list_del_init(&obj->exec_list);
		drm_gem_object_unreference(&obj->base);
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
	}

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	kfree(cliprects);
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
			     (struct drm_i915_relocation_entry __user *)
			     (uintptr_t) args->buffers_ptr,
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
		DRM_ERROR("copy %d exec entries failed %d\n",
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;

	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
				   (uintptr_t) args->buffers_ptr,
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
			DRM_ERROR("failed to copy %d exec entries "
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}

1308 1309 1310 1311 1312
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
			     GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
	if (exec2_list == NULL) {
		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
			     (struct drm_i915_relocation_entry __user *)
			     (uintptr_t) args->buffers_ptr,
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
		DRM_ERROR("copy %d exec entries failed %d\n",
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
				   (uintptr_t) args->buffers_ptr,
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
			DRM_ERROR("failed to copy %d exec entries "
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}