i915_gem_execbuffer.c 35.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

struct change_domains {
	uint32_t invalidate_domains;
	uint32_t flush_domains;
	uint32_t flush_rings;
};

/*
 * Set the next domain for the specified object. This
 * may not actually perform the necessary flushing/invaliding though,
 * as that may want to be batched with other set_domain operations
 *
 * This is (we hope) the only really tricky part of gem. The goal
 * is fairly simple -- track which caches hold bits of the object
 * and make sure they remain coherent. A few concrete examples may
 * help to explain how it works. For shorthand, we use the notation
 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
 * a pair of read and write domain masks.
 *
 * Case 1: the batch buffer
 *
 *	1. Allocated
 *	2. Written by CPU
 *	3. Mapped to GTT
 *	4. Read by GPU
 *	5. Unmapped from GTT
 *	6. Freed
 *
 *	Let's take these a step at a time
 *
 *	1. Allocated
 *		Pages allocated from the kernel may still have
 *		cache contents, so we set them to (CPU, CPU) always.
 *	2. Written by CPU (using pwrite)
 *		The pwrite function calls set_domain (CPU, CPU) and
 *		this function does nothing (as nothing changes)
 *	3. Mapped by GTT
 *		This function asserts that the object is not
 *		currently in any GPU-based read or write domains
 *	4. Read by GPU
 *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
 *		As write_domain is zero, this function adds in the
 *		current read domains (CPU+COMMAND, 0).
 *		flush_domains is set to CPU.
 *		invalidate_domains is set to COMMAND
 *		clflush is run to get data out of the CPU caches
 *		then i915_dev_set_domain calls i915_gem_flush to
 *		emit an MI_FLUSH and drm_agp_chipset_flush
 *	5. Unmapped from GTT
 *		i915_gem_object_unbind calls set_domain (CPU, CPU)
 *		flush_domains and invalidate_domains end up both zero
 *		so no flushing/invalidating happens
 *	6. Freed
 *		yay, done
 *
 * Case 2: The shared render buffer
 *
 *	1. Allocated
 *	2. Mapped to GTT
 *	3. Read/written by GPU
 *	4. set_domain to (CPU,CPU)
 *	5. Read/written by CPU
 *	6. Read/written by GPU
 *
 *	1. Allocated
 *		Same as last example, (CPU, CPU)
 *	2. Mapped to GTT
 *		Nothing changes (assertions find that it is not in the GPU)
 *	3. Read/written by GPU
 *		execbuffer calls set_domain (RENDER, RENDER)
 *		flush_domains gets CPU
 *		invalidate_domains gets GPU
 *		clflush (obj)
 *		MI_FLUSH and drm_agp_chipset_flush
 *	4. set_domain (CPU, CPU)
 *		flush_domains gets GPU
 *		invalidate_domains gets CPU
 *		wait_rendering (obj) to make sure all drawing is complete.
 *		This will include an MI_FLUSH to get the data from GPU
 *		to memory
 *		clflush (obj) to invalidate the CPU cache
 *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
 *	5. Read/written by CPU
 *		cache lines are loaded and dirtied
 *	6. Read written by GPU
 *		Same as last GPU access
 *
 * Case 3: The constant buffer
 *
 *	1. Allocated
 *	2. Written by CPU
 *	3. Read by GPU
 *	4. Updated (written) by CPU again
 *	5. Read by GPU
 *
 *	1. Allocated
 *		(CPU, CPU)
 *	2. Written by CPU
 *		(CPU, CPU)
 *	3. Read by GPU
 *		(CPU+RENDER, 0)
 *		flush_domains = CPU
 *		invalidate_domains = RENDER
 *		clflush (obj)
 *		MI_FLUSH
 *		drm_agp_chipset_flush
 *	4. Updated (written) by CPU again
 *		(CPU, CPU)
 *		flush_domains = 0 (no previous write domain)
 *		invalidate_domains = 0 (no new read domains)
 *	5. Read by GPU
 *		(CPU+RENDER, 0)
 *		flush_domains = CPU
 *		invalidate_domains = RENDER
 *		clflush (obj)
 *		MI_FLUSH
 *		drm_agp_chipset_flush
 */
static void
i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
				  struct intel_ring_buffer *ring,
				  struct change_domains *cd)
{
	uint32_t invalidate_domains = 0, flush_domains = 0;

	/*
	 * If the object isn't moving to a new write domain,
	 * let the object stay in multiple read domains
	 */
	if (obj->base.pending_write_domain == 0)
		obj->base.pending_read_domains |= obj->base.read_domains;

	/*
	 * Flush the current write domain if
	 * the new read domains don't match. Invalidate
	 * any read domains which differ from the old
	 * write domain
	 */
	if (obj->base.write_domain &&
	    (((obj->base.write_domain != obj->base.pending_read_domains ||
	       obj->ring != ring)) ||
	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
		flush_domains |= obj->base.write_domain;
		invalidate_domains |=
			obj->base.pending_read_domains & ~obj->base.write_domain;
	}
	/*
	 * Invalidate any read caches which may have
	 * stale data. That is, any new read domains.
	 */
	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
		i915_gem_clflush_object(obj);

	/* blow away mappings if mapped through GTT */
	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
		i915_gem_release_mmap(obj);

	/* The actual obj->write_domain will be updated with
	 * pending_write_domain after we emit the accumulated flush for all
	 * of our domain changes in execbuffers (which clears objects'
	 * write_domains).  So if we have a current write domain that we
	 * aren't changing, set pending_write_domain to that.
	 */
	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
		obj->base.pending_write_domain = obj->base.write_domain;

	cd->invalidate_domains |= invalidate_domains;
	cd->flush_domains |= flush_domains;
	if (flush_domains & I915_GEM_GPU_DOMAINS)
		cd->flush_rings |= obj->ring->id;
	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
		cd->flush_rings |= ring->id;
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
struct eb_objects {
	int and;
	struct hlist_head buckets[0];
};

static struct eb_objects *
eb_create(int size)
{
	struct eb_objects *eb;
	int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
	while (count > size)
		count >>= 1;
	eb = kzalloc(count*sizeof(struct hlist_head) +
		     sizeof(struct eb_objects),
		     GFP_KERNEL);
	if (eb == NULL)
		return eb;

	eb->and = count - 1;
	return eb;
}

static void
eb_reset(struct eb_objects *eb)
{
	memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}

static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
{
	hlist_add_head(&obj->exec_node,
		       &eb->buckets[obj->exec_handle & eb->and]);
}

static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct drm_i915_gem_object *obj;

	head = &eb->buckets[handle & eb->and];
	hlist_for_each(node, head) {
		obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
		if (obj->exec_handle == handle)
			return obj;
	}

	return NULL;
}

static void
eb_destroy(struct eb_objects *eb)
{
	kfree(eb);
}

268 269
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
270
				   struct eb_objects *eb,
271 272 273 274 275 276 277
				   struct drm_i915_gem_relocation_entry *reloc)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
	uint32_t target_offset;
	int ret = -EINVAL;

278 279 280
	/* we've already hold a reference to all valid objects */
	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
	if (unlikely(target_obj == NULL))
281 282 283 284 285 286 287
		return -ENOENT;

	target_offset = to_intel_bo(target_obj)->gtt_offset;

	/* The target buffer should have appeared before us in the
	 * exec_object list, so it should have a GTT space bound by now.
	 */
288
	if (unlikely(target_offset == 0)) {
289 290
		DRM_ERROR("No GTT space found for object %d\n",
			  reloc->target_handle);
291
		return ret;
292 293 294
	}

	/* Validate that the target is in a valid r/w GPU domain */
295
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
296 297 298 299 300 301 302
		DRM_ERROR("reloc with multiple write domains: "
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
303
		return ret;
304
	}
305
	if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
306 307 308 309 310 311 312
		DRM_ERROR("reloc with read/write CPU domains: "
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
313
		return ret;
314
	}
315 316
	if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
		     reloc->write_domain != target_obj->pending_write_domain)) {
317 318 319 320 321 322 323
		DRM_ERROR("Write domain conflict: "
			  "obj %p target %d offset %d "
			  "new %08x old %08x\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->write_domain,
			  target_obj->pending_write_domain);
324
		return ret;
325 326 327 328 329 330 331 332 333
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
334
		return 0;
335 336

	/* Check that the relocation address is valid... */
337
	if (unlikely(reloc->offset > obj->base.size - 4)) {
338 339 340 341 342
		DRM_ERROR("Relocation beyond object bounds: "
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
343
		return ret;
344
	}
345
	if (unlikely(reloc->offset & 3)) {
346 347 348 349
		DRM_ERROR("Relocation not 4-byte aligned: "
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
350
		return ret;
351 352 353
	}

	/* and points to somewhere within the target object. */
354
	if (unlikely(reloc->delta >= target_obj->size)) {
355 356 357 358 359
		DRM_ERROR("Relocation beyond target object bounds: "
			  "obj %p target %d delta %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->delta,
			  (int) target_obj->size);
360
		return ret;
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
	}

	reloc->delta += target_offset;
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
		char *vaddr;

		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
		kunmap_atomic(vaddr);
	} else {
		struct drm_i915_private *dev_priv = dev->dev_private;
		uint32_t __iomem *reloc_entry;
		void __iomem *reloc_page;

		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
		if (ret)
378
			return ret;
379 380 381 382 383 384 385 386 387 388 389 390 391 392

		/* Map the page containing the relocation we're going to perform.  */
		reloc->offset += obj->gtt_offset;
		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
						      reloc->offset & PAGE_MASK);
		reloc_entry = (uint32_t __iomem *)
			(reloc_page + (reloc->offset & ~PAGE_MASK));
		iowrite32(reloc->delta, reloc_entry);
		io_mapping_unmap_atomic(reloc_page);
	}

	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

393
	return 0;
394 395 396 397
}

static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
398
				    struct eb_objects *eb)
399 400
{
	struct drm_i915_gem_relocation_entry __user *user_relocs;
401
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
402 403 404 405 406 407 408 409 410 411 412
	int i, ret;

	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
	for (i = 0; i < entry->relocation_count; i++) {
		struct drm_i915_gem_relocation_entry reloc;

		if (__copy_from_user_inatomic(&reloc,
					      user_relocs+i,
					      sizeof(reloc)))
			return -EFAULT;

413
		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
414 415 416 417 418 419 420 421 422 423 424 425 426 427
		if (ret)
			return ret;

		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
					    &reloc.presumed_offset,
					    sizeof(reloc.presumed_offset)))
			return -EFAULT;
	}

	return 0;
}

static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
428
					 struct eb_objects *eb,
429 430
					 struct drm_i915_gem_relocation_entry *relocs)
{
431
	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
432 433 434
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
435
		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
436 437 438 439 440 441 442 443 444
		if (ret)
			return ret;
	}

	return 0;
}

static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
445
			     struct eb_objects *eb,
446
			     struct list_head *objects)
447
{
448 449
	struct drm_i915_gem_object *obj;
	int ret;
450

451
	list_for_each_entry(obj, objects, exec_list) {
452
		ret = i915_gem_execbuffer_relocate_object(obj, eb);
453 454 455 456 457 458 459 460
		if (ret)
			return ret;
	}

	return 0;
}

static int
461
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
462
			    struct drm_file *file,
463
			    struct list_head *objects)
464
{
465 466
	struct drm_i915_gem_object *obj;
	int ret, retry;
467
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	struct list_head ordered_objects;

	INIT_LIST_HEAD(&ordered_objects);
	while (!list_empty(objects)) {
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

		obj = list_first_entry(objects,
				       struct drm_i915_gem_object,
				       exec_list);
		entry = obj->exec_entry;

		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
		need_mappable =
			entry->relocation_count ? true : need_fence;

		if (need_mappable)
			list_move(&obj->exec_list, &ordered_objects);
		else
			list_move_tail(&obj->exec_list, &ordered_objects);
491 492 493

		obj->base.pending_read_domains = 0;
		obj->base.pending_write_domain = 0;
494 495
	}
	list_splice(&ordered_objects, objects);
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to makr
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
		ret = 0;

		/* Unbind any ill-fitting objects or pin. */
514
		list_for_each_entry(obj, objects, exec_list) {
515
			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
516
			bool need_fence, need_mappable;
517
			if (!obj->gtt_space)
518 519 520
				continue;

			need_fence =
521
				has_fenced_gpu_access &&
522 523 524 525 526 527 528 529 530 531 532 533
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;
			need_mappable =
				entry->relocation_count ? true : need_fence;

			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
			    (need_mappable && !obj->map_and_fenceable))
				ret = i915_gem_object_unbind(obj);
			else
				ret = i915_gem_object_pin(obj,
							  entry->alignment,
							  need_mappable);
534
			if (ret)
535
				goto err;
536 537

			entry++;
538 539 540
		}

		/* Bind fresh objects */
541
		list_for_each_entry(obj, objects, exec_list) {
542
			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
543 544 545
			bool need_fence;

			need_fence =
546
				has_fenced_gpu_access &&
547 548 549 550 551 552 553 554 555 556 557 558 559 560
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;

			if (!obj->gtt_space) {
				bool need_mappable =
					entry->relocation_count ? true : need_fence;

				ret = i915_gem_object_pin(obj,
							  entry->alignment,
							  need_mappable);
				if (ret)
					break;
			}

561 562 563 564 565 566 567 568 569 570 571 572 573
			if (has_fenced_gpu_access) {
				if (need_fence) {
					ret = i915_gem_object_get_fence(obj, ring, 1);
					if (ret)
						break;
				} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
					   obj->tiling_mode == I915_TILING_NONE) {
					/* XXX pipelined! */
					ret = i915_gem_object_put_fence(obj);
					if (ret)
						break;
				}
				obj->pending_fenced_gpu_access = need_fence;
574 575 576 577 578
			}

			entry->offset = obj->gtt_offset;
		}

579 580
		/* Decrement pin count for bound objects */
		list_for_each_entry(obj, objects, exec_list) {
581 582 583 584 585 586 587 588 589 590
			if (obj->gtt_space)
				i915_gem_object_unpin(obj);
		}

		if (ret != -ENOSPC || retry > 1)
			return ret;

		/* First attempt, just clear anything that is purgeable.
		 * Second attempt, clear the entire GTT.
		 */
591
		ret = i915_gem_evict_everything(ring->dev, retry == 0);
592 593 594 595 596
		if (ret)
			return ret;

		retry++;
	} while (1);
597 598

err:
599 600 601
	obj = list_entry(obj->exec_list.prev,
			 struct drm_i915_gem_object,
			 exec_list);
602 603 604 605 606 607 608 609 610 611
	while (objects != &obj->exec_list) {
		if (obj->gtt_space)
			i915_gem_object_unpin(obj);

		obj = list_entry(obj->exec_list.prev,
				 struct drm_i915_gem_object,
				 exec_list);
	}

	return ret;
612 613 614 615 616
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
				  struct drm_file *file,
617
				  struct intel_ring_buffer *ring,
618
				  struct list_head *objects,
619
				  struct eb_objects *eb,
620
				  struct drm_i915_gem_exec_object2 *exec,
621 622 623
				  int count)
{
	struct drm_i915_gem_relocation_entry *reloc;
624
	struct drm_i915_gem_object *obj;
625
	int *reloc_offset;
626 627
	int i, total, ret;

628
	/* We may process another execbuffer during the unlock... */
629
	while (!list_empty(objects)) {
630 631 632 633 634 635 636
		obj = list_first_entry(objects,
				       struct drm_i915_gem_object,
				       exec_list);
		list_del_init(&obj->exec_list);
		drm_gem_object_unreference(&obj->base);
	}

637 638 639 640
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
641
		total += exec[i].relocation_count;
642

643
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
644
	reloc = drm_malloc_ab(total, sizeof(*reloc));
645 646 647
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
648 649 650 651 652 653 654 655
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;

656
		user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
657 658

		if (copy_from_user(reloc+total, user_relocs,
659
				   exec[i].relocation_count * sizeof(*reloc))) {
660 661 662 663 664
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

665
		reloc_offset[i] = total;
666
		total += exec[i].relocation_count;
667 668 669 670 671 672 673 674
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

675 676 677 678 679 680 681 682 683 684 685 686 687 688
	/* reacquire the objects */
	eb_reset(eb);
	for (i = 0; i < count; i++) {
		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
							exec[i].handle));
		if (obj == NULL) {
			DRM_ERROR("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
			ret = -ENOENT;
			goto err;
		}

		list_add_tail(&obj->exec_list, objects);
		obj->exec_handle = exec[i].handle;
689
		obj->exec_entry = &exec[i];
690 691 692
		eb_add_object(eb, obj);
	}

693
	ret = i915_gem_execbuffer_reserve(ring, file, objects);
694 695 696
	if (ret)
		goto err;

697
	list_for_each_entry(obj, objects, exec_list) {
698
		int offset = obj->exec_entry - exec;
699
		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
700
							       reloc + reloc_offset[offset]);
701 702 703 704 705 706 707 708 709 710 711 712
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
713
	drm_free_large(reloc_offset);
714 715 716
	return ret;
}

717
static int
718 719 720 721 722 723
i915_gem_execbuffer_flush(struct drm_device *dev,
			  uint32_t invalidate_domains,
			  uint32_t flush_domains,
			  uint32_t flush_rings)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
724
	int i, ret;
725 726 727 728

	if (flush_domains & I915_GEM_DOMAIN_CPU)
		intel_gtt_chipset_flush();

729 730 731
	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

732
	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
733
		for (i = 0; i < I915_NUM_RINGS; i++)
734
			if (flush_rings & (1 << i)) {
C
Chris Wilson 已提交
735
				ret = i915_gem_flush_ring(&dev_priv->ring[i],
736 737 738 739 740
							  invalidate_domains,
							  flush_domains);
				if (ret)
					return ret;
			}
741
	}
742 743

	return 0;
744 745
}

746 747 748 749 750 751 752 753 754 755 756
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
			       struct intel_ring_buffer *to)
{
	struct intel_ring_buffer *from = obj->ring;
	u32 seqno;
	int ret, idx;

	if (from == NULL || to == from)
		return 0;

757 758
	/* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
	if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
		return i915_gem_object_wait_rendering(obj, true);

	idx = intel_ring_sync_index(from, to);

	seqno = obj->last_rendering_seqno;
	if (seqno <= from->sync_seqno[idx])
		return 0;

	if (seqno == from->outstanding_lazy_request) {
		struct drm_i915_gem_request *request;

		request = kzalloc(sizeof(*request), GFP_KERNEL);
		if (request == NULL)
			return -ENOMEM;

C
Chris Wilson 已提交
774
		ret = i915_add_request(from, NULL, request);
775 776 777 778 779 780 781 782 783 784 785
		if (ret) {
			kfree(request);
			return ret;
		}

		seqno = request->seqno;
	}

	from->sync_seqno[idx] = seqno;
	return intel_ring_sync(to, from, seqno - 1);
}
786 787

static int
788 789
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
				struct list_head *objects)
790
{
791
	struct drm_i915_gem_object *obj;
792
	struct change_domains cd;
793
	int ret;
794 795 796 797

	cd.invalidate_domains = 0;
	cd.flush_domains = 0;
	cd.flush_rings = 0;
798 799
	list_for_each_entry(obj, objects, exec_list)
		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
800 801

	if (cd.invalidate_domains | cd.flush_domains) {
802 803 804 805 806 807
		ret = i915_gem_execbuffer_flush(ring->dev,
						cd.invalidate_domains,
						cd.flush_domains,
						cd.flush_rings);
		if (ret)
			return ret;
808 809
	}

810
	list_for_each_entry(obj, objects, exec_list) {
811 812 813
		ret = i915_gem_execbuffer_sync_rings(obj, ring);
		if (ret)
			return ret;
814 815 816 817 818
	}

	return 0;
}

819 820
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
821
{
822
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;

	for (i = 0; i < count; i++) {
		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
		int length; /* limited by fault_in_pages_readable() */

		/* First check for malicious input causing overflow */
		if (exec[i].relocation_count >
		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
			return -EINVAL;

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
		if (!access_ok(VERIFY_READ, ptr, length))
			return -EFAULT;

		/* we may also need to update the presumed offsets */
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

		if (fault_in_pages_readable(ptr, length))
			return -EFAULT;
	}

	return 0;
}

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
				   struct list_head *objects)
{
	struct drm_i915_gem_object *obj;
	int flips;

	/* Check for any pending flips. As we only maintain a flip queue depth
	 * of 1, we can simply insert a WAIT for the next display flip prior
	 * to executing the batch and avoid stalling the CPU.
	 */
	flips = 0;
	list_for_each_entry(obj, objects, exec_list) {
		if (obj->base.write_domain)
			flips |= atomic_read(&obj->pending_flip);
	}
	if (flips) {
		int plane, flip_mask, ret;

		for (plane = 0; flips >> plane; plane++) {
			if (((flips >> plane) & 1) == 0)
				continue;

			if (plane)
				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
			else
				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;

			ret = intel_ring_begin(ring, 2);
			if (ret)
				return ret;

			intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
			intel_ring_emit(ring, MI_NOOP);
			intel_ring_advance(ring);
		}
	}

	return 0;
}

static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
899 900
				   struct intel_ring_buffer *ring,
				   u32 seqno)
901 902 903 904
{
	struct drm_i915_gem_object *obj;

	list_for_each_entry(obj, objects, exec_list) {
C
Chris Wilson 已提交
905 906 907 908
		  u32 old_read = obj->base.read_domains;
		  u32 old_write = obj->base.write_domain;


909 910 911 912
		obj->base.read_domains = obj->base.pending_read_domains;
		obj->base.write_domain = obj->base.pending_write_domain;
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

913
		i915_gem_object_move_to_active(obj, ring, seqno);
914 915
		if (obj->base.write_domain) {
			obj->dirty = 1;
916
			obj->pending_gpu_write = true;
917 918 919 920 921
			list_move_tail(&obj->gpu_write_list,
				       &ring->gpu_write_list);
			intel_mark_busy(ring->dev, obj);
		}

C
Chris Wilson 已提交
922
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
923 924 925
	}
}

926 927
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
928
				    struct drm_file *file,
929 930
				    struct intel_ring_buffer *ring)
{
931
	struct drm_i915_gem_request *request;
932
	u32 invalidate;
933

934 935 936 937 938 939
	/*
	 * Ensure that the commands in the batch buffer are
	 * finished before the interrupt fires.
	 *
	 * The sampler always gets flushed on i965 (sigh).
	 */
940
	invalidate = I915_GEM_DOMAIN_COMMAND;
941
	if (INTEL_INFO(dev)->gen >= 4)
942 943
		invalidate |= I915_GEM_DOMAIN_SAMPLER;
	if (ring->flush(ring, invalidate, 0)) {
C
Chris Wilson 已提交
944
		i915_gem_next_request_seqno(ring);
945 946
		return;
	}
947

948 949
	/* Add a breadcrumb for the completion of the batch buffer */
	request = kzalloc(sizeof(*request), GFP_KERNEL);
C
Chris Wilson 已提交
950 951
	if (request == NULL || i915_add_request(ring, file, request)) {
		i915_gem_next_request_seqno(ring);
952 953 954
		kfree(request);
	}
}
955 956 957 958 959

static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
960
		       struct drm_i915_gem_exec_object2 *exec)
961 962
{
	drm_i915_private_t *dev_priv = dev->dev_private;
963
	struct list_head objects;
964
	struct eb_objects *eb;
965 966 967
	struct drm_i915_gem_object *batch_obj;
	struct drm_clip_rect *cliprects = NULL;
	struct intel_ring_buffer *ring;
968
	u32 exec_start, exec_len;
969
	u32 seqno;
970
	int ret, mode, i;
971

972 973 974 975 976 977
	if (!i915_gem_check_execbuffer(args)) {
		DRM_ERROR("execbuf with invalid offset/length\n");
		return -EINVAL;
	}

	ret = validate_exec_list(exec, args->buffer_count);
978 979 980 981 982 983
	if (ret)
		return ret;

	switch (args->flags & I915_EXEC_RING_MASK) {
	case I915_EXEC_DEFAULT:
	case I915_EXEC_RENDER:
984
		ring = &dev_priv->ring[RCS];
985 986 987 988 989 990
		break;
	case I915_EXEC_BSD:
		if (!HAS_BSD(dev)) {
			DRM_ERROR("execbuf with invalid ring (BSD)\n");
			return -EINVAL;
		}
991
		ring = &dev_priv->ring[VCS];
992 993 994 995 996 997
		break;
	case I915_EXEC_BLT:
		if (!HAS_BLT(dev)) {
			DRM_ERROR("execbuf with invalid ring (BLT)\n");
			return -EINVAL;
		}
998
		ring = &dev_priv->ring[BCS];
999 1000 1001 1002 1003 1004 1005
		break;
	default:
		DRM_ERROR("execbuf with unknown ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
	switch (mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (ring == &dev_priv->ring[RCS] &&
		    mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4)
				return -EINVAL;

			if (INTEL_INFO(dev)->gen > 5 &&
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
				return -EINVAL;

			ret = intel_ring_begin(ring, 4);
			if (ret)
				return ret;

			intel_ring_emit(ring, MI_NOOP);
			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
			intel_ring_emit(ring, INSTPM);
			intel_ring_emit(ring,
					I915_EXEC_CONSTANTS_MASK << 16 | mode);
			intel_ring_advance(ring);

			dev_priv->relative_constants_mode = mode;
		}
		break;
	default:
		DRM_ERROR("execbuf with unknown constants: %d\n", mode);
		return -EINVAL;
	}

1039 1040 1041 1042 1043 1044
	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}

	if (args->num_cliprects != 0) {
1045
		if (ring != &dev_priv->ring[RCS]) {
1046 1047 1048 1049
			DRM_ERROR("clip rectangles are only valid with the render ring\n");
			return -EINVAL;
		}

1050
		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1051 1052 1053 1054 1055 1056
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto pre_mutex_err;
		}

1057 1058 1059 1060
		if (copy_from_user(cliprects,
				     (struct drm_clip_rect __user *)(uintptr_t)
				     args->cliprects_ptr,
				     sizeof(*cliprects)*args->num_cliprects)) {
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
			ret = -EFAULT;
			goto pre_mutex_err;
		}
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

	if (dev_priv->mm.suspended) {
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1076 1077 1078 1079 1080 1081 1082
	eb = eb_create(args->buffer_count);
	if (eb == NULL) {
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1083
	/* Look up object handles */
1084
	INIT_LIST_HEAD(&objects);
1085 1086 1087
	for (i = 0; i < args->buffer_count; i++) {
		struct drm_i915_gem_object *obj;

1088 1089
		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
							exec[i].handle));
1090 1091
		if (obj == NULL) {
			DRM_ERROR("Invalid object handle %d at index %d\n",
1092
				   exec[i].handle, i);
1093 1094 1095 1096 1097
			/* prevent error path from reading uninitialized data */
			ret = -ENOENT;
			goto err;
		}

1098 1099 1100
		if (!list_empty(&obj->exec_list)) {
			DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
1101 1102 1103
			ret = -EINVAL;
			goto err;
		}
1104 1105

		list_add_tail(&obj->exec_list, &objects);
1106
		obj->exec_handle = exec[i].handle;
1107
		obj->exec_entry = &exec[i];
1108
		eb_add_object(eb, obj);
1109 1110
	}

1111 1112 1113 1114 1115
	/* take note of the batch buffer before we might reorder the lists */
	batch_obj = list_entry(objects.prev,
			       struct drm_i915_gem_object,
			       exec_list);

1116
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1117
	ret = i915_gem_execbuffer_reserve(ring, file, &objects);
1118 1119 1120 1121
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1122
	ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
1123 1124
	if (ret) {
		if (ret == -EFAULT) {
1125
			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
1126 1127
								&objects, eb,
								exec,
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
								args->buffer_count);
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1143 1144
	ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
	if (ret)
1145 1146
		goto err;

1147
	ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
1148 1149 1150
	if (ret)
		goto err;

C
Chris Wilson 已提交
1151
	seqno = i915_gem_next_request_seqno(ring);
1152
	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
		if (seqno < ring->sync_seqno[i]) {
			/* The GPU can not handle its semaphore value wrapping,
			 * so every billion or so execbuffers, we need to stall
			 * the GPU in order to reset the counters.
			 */
			ret = i915_gpu_idle(dev);
			if (ret)
				goto err;

			BUG_ON(ring->sync_seqno[i]);
		}
	}

C
Chris Wilson 已提交
1166 1167
	trace_i915_gem_ring_dispatch(ring, seqno);

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto err;

			ret = ring->dispatch_execbuffer(ring,
							exec_start, exec_len);
			if (ret)
				goto err;
		}
	} else {
		ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
		if (ret)
			goto err;
	}
1187

1188
	i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1189
	i915_gem_execbuffer_retire_commands(dev, file, ring);
1190 1191

err:
1192
	eb_destroy(eb);
1193 1194 1195 1196 1197 1198 1199 1200
	while (!list_empty(&objects)) {
		struct drm_i915_gem_object *obj;

		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       exec_list);
		list_del_init(&obj->exec_list);
		drm_gem_object_unreference(&obj->base);
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	}

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	kfree(cliprects);
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
			     (struct drm_i915_relocation_entry __user *)
			     (uintptr_t) args->buffers_ptr,
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
		DRM_ERROR("copy %d exec entries failed %d\n",
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;

	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
				   (uintptr_t) args->buffers_ptr,
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
			DRM_ERROR("failed to copy %d exec entries "
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

	if (args->buffer_count < 1) {
		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
		return -EINVAL;
	}

	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec2_list == NULL) {
		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
			     (struct drm_i915_relocation_entry __user *)
			     (uintptr_t) args->buffers_ptr,
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
		DRM_ERROR("copy %d exec entries failed %d\n",
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
				   (uintptr_t) args->buffers_ptr,
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
			DRM_ERROR("failed to copy %d exec entries "
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}