i915_gem_execbuffer.c 33.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35

36
struct eb_objects {
37
	struct list_head objects;
38
	int and;
39 40 41 42
	union {
		struct drm_i915_gem_object *lut[0];
		struct hlist_head buckets[0];
	};
43 44 45
};

static struct eb_objects *
46
eb_create(struct drm_i915_gem_execbuffer2 *args)
47
{
48 49 50 51 52 53 54 55 56 57 58 59
	struct eb_objects *eb = NULL;

	if (args->flags & I915_EXEC_HANDLE_LUT) {
		int size = args->buffer_count;
		size *= sizeof(struct drm_i915_gem_object *);
		size += sizeof(struct eb_objects);
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
		int size = args->buffer_count;
		int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
60
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 62 63 64 65 66 67 68 69 70 71 72
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
			     sizeof(struct eb_objects),
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

73
	INIT_LIST_HEAD(&eb->objects);
74 75 76 77 78 79
	return eb;
}

static void
eb_reset(struct eb_objects *eb)
{
80 81
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
82 83
}

84 85 86
static int
eb_lookup_objects(struct eb_objects *eb,
		  struct drm_i915_gem_exec_object2 *exec,
87
		  const struct drm_i915_gem_execbuffer2 *args,
88
		  struct drm_file *file)
89 90 91 92
{
	int i;

	spin_lock(&file->table_lock);
93
	for (i = 0; i < args->buffer_count; i++) {
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
		struct drm_i915_gem_object *obj;

		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
			return -ENOENT;
		}

		if (!list_empty(&obj->exec_list)) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
			return -EINVAL;
		}

		drm_gem_object_reference(&obj->base);
112
		list_add_tail(&obj->exec_list, &eb->objects);
113 114

		obj->exec_entry = &exec[i];
115 116 117 118 119 120 121 122
		if (eb->and < 0) {
			eb->lut[i] = obj;
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
			obj->exec_handle = handle;
			hlist_add_head(&obj->exec_node,
				       &eb->buckets[handle & eb->and]);
		}
123 124 125 126 127 128
	}
	spin_unlock(&file->table_lock);

	return 0;
}

129 130 131
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
132 133 134 135 136 137 138
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
		struct hlist_node *node;
139

140 141 142
		head = &eb->buckets[handle & eb->and];
		hlist_for_each(node, head) {
			struct drm_i915_gem_object *obj;
143

144 145 146 147 148 149
			obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
			if (obj->exec_handle == handle)
				return obj;
		}
		return NULL;
	}
150 151 152 153 154
}

static void
eb_destroy(struct eb_objects *eb)
{
155 156 157 158 159 160 161 162 163
	while (!list_empty(&eb->objects)) {
		struct drm_i915_gem_object *obj;

		obj = list_first_entry(&eb->objects,
				       struct drm_i915_gem_object,
				       exec_list);
		list_del_init(&obj->exec_list);
		drm_gem_object_unreference(&obj->base);
	}
164 165 166
	kfree(eb);
}

167 168 169
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
170
		!obj->map_and_fenceable ||
171 172 173
		obj->cache_level != I915_CACHE_NONE);
}

174 175
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
176
				   struct eb_objects *eb,
177 178
				   struct drm_i915_gem_relocation_entry *reloc,
				   struct i915_address_space *vm)
179 180 181
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
182
	struct drm_i915_gem_object *target_i915_obj;
183 184 185
	uint32_t target_offset;
	int ret = -EINVAL;

186 187 188
	/* we've already hold a reference to all valid objects */
	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
	if (unlikely(target_obj == NULL))
189 190
		return -ENOENT;

191
	target_i915_obj = to_intel_bo(target_obj);
192
	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
193

194 195 196 197 198 199 200 201 202 203
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
	    !target_i915_obj->has_global_gtt_mapping)) {
		i915_gem_gtt_bind_object(target_i915_obj,
					 target_i915_obj->cache_level);
	}

204
	/* Validate that the target is in a valid r/w GPU domain */
205
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
206
		DRM_DEBUG("reloc with multiple write domains: "
207 208 209 210 211 212
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
213
		return ret;
214
	}
215 216
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
217
		DRM_DEBUG("reloc with read/write non-GPU domains: "
218 219 220 221 222 223
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
224
		return ret;
225 226 227 228 229 230 231 232 233
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
234
		return 0;
235 236

	/* Check that the relocation address is valid... */
237
	if (unlikely(reloc->offset > obj->base.size - 4)) {
238
		DRM_DEBUG("Relocation beyond object bounds: "
239 240 241 242
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
243
		return ret;
244
	}
245
	if (unlikely(reloc->offset & 3)) {
246
		DRM_DEBUG("Relocation not 4-byte aligned: "
247 248 249
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
250
		return ret;
251 252
	}

253 254 255 256
	/* We can't wait for rendering with pagefaults disabled */
	if (obj->active && in_atomic())
		return -EFAULT;

257
	reloc->delta += target_offset;
258
	if (use_cpu_reloc(obj)) {
259
		uint32_t page_offset = offset_in_page(reloc->offset);
260 261
		char *vaddr;

262 263 264 265
		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
		if (ret)
			return ret;

266 267
		vaddr = kmap_atomic(i915_gem_object_get_page(obj,
							     reloc->offset >> PAGE_SHIFT));
268 269 270 271 272 273 274
		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
		kunmap_atomic(vaddr);
	} else {
		struct drm_i915_private *dev_priv = dev->dev_private;
		uint32_t __iomem *reloc_entry;
		void __iomem *reloc_page;

275 276 277 278 279
		ret = i915_gem_object_set_to_gtt_domain(obj, true);
		if (ret)
			return ret;

		ret = i915_gem_object_put_fence(obj);
280
		if (ret)
281
			return ret;
282 283

		/* Map the page containing the relocation we're going to perform.  */
284
		reloc->offset += i915_gem_obj_ggtt_offset(obj);
B
Ben Widawsky 已提交
285
		reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
286 287
						      reloc->offset & PAGE_MASK);
		reloc_entry = (uint32_t __iomem *)
288
			(reloc_page + offset_in_page(reloc->offset));
289 290 291 292 293 294 295
		iowrite32(reloc->delta, reloc_entry);
		io_mapping_unmap_atomic(reloc_page);
	}

	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

296
	return 0;
297 298 299 300
}

static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
301 302
				    struct eb_objects *eb,
				    struct i915_address_space *vm)
303
{
304 305
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
306
	struct drm_i915_gem_relocation_entry __user *user_relocs;
307
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
308
	int remain, ret;
309

V
Ville Syrjälä 已提交
310
	user_relocs = to_user_ptr(entry->relocs_ptr);
311

312 313 314 315 316 317 318 319 320
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
321 322
			return -EFAULT;

323 324
		do {
			u64 offset = r->presumed_offset;
325

326 327
			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
								 vm);
328 329 330 331 332 333 334 335 336 337 338 339 340
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
341 342 343
	}

	return 0;
344
#undef N_RELOC
345 346 347 348
}

static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
349
					 struct eb_objects *eb,
350 351
					 struct drm_i915_gem_relocation_entry *relocs,
					 struct i915_address_space *vm)
352
{
353
	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
354 355 356
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
357 358
		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
							 vm);
359 360 361 362 363 364 365 366
		if (ret)
			return ret;
	}

	return 0;
}

static int
367 368
i915_gem_execbuffer_relocate(struct eb_objects *eb,
			     struct i915_address_space *vm)
369
{
370
	struct drm_i915_gem_object *obj;
371 372 373 374 375 376 377 378 379 380
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
381
	list_for_each_entry(obj, &eb->objects, exec_list) {
382
		ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
383
		if (ret)
384
			break;
385
	}
386
	pagefault_enable();
387

388
	return ret;
389 390
}

391 392
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
393

394 395 396 397 398 399 400
static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
{
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
	return entry->relocation_count && !use_cpu_reloc(obj);
}

401
static int
402
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
403
				   struct intel_ring_buffer *ring,
404
				   struct i915_address_space *vm,
405
				   bool *need_reloc)
406
{
407
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
408 409 410 411 412 413 414 415 416
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	bool need_fence, need_mappable;
	int ret;

	need_fence =
		has_fenced_gpu_access &&
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
		obj->tiling_mode != I915_TILING_NONE;
417
	need_mappable = need_fence || need_reloc_mappable(obj);
418

419 420
	ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
				  false);
421 422 423
	if (ret)
		return ret;

424 425
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

426 427
	if (has_fenced_gpu_access) {
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
428
			ret = i915_gem_object_get_fence(obj);
429
			if (ret)
430
				return ret;
431

432
			if (i915_gem_object_pin_fence(obj))
433
				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
434

435
			obj->pending_fenced_gpu_access = true;
436 437 438
		}
	}

439 440 441 442 443 444 445 446
	/* Ensure ppgtt mapping exists if needed */
	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
				       obj, obj->cache_level);

		obj->has_aliasing_ppgtt_mapping = 1;
	}

447 448
	if (entry->offset != i915_gem_obj_offset(obj, vm)) {
		entry->offset = i915_gem_obj_offset(obj, vm);
449 450 451 452 453 454 455 456 457 458 459 460
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
	    !obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(obj, obj->cache_level);

461
	return 0;
462
}
463

464 465 466 467 468
static void
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
	struct drm_i915_gem_exec_object2 *entry;

469
	if (!i915_gem_obj_ggtt_bound(obj))
470 471 472 473 474 475 476 477 478 479 480
		return;

	entry = obj->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
		i915_gem_object_unpin(obj);

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
481 482
}

483
static int
484
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
485
			    struct list_head *objects,
486
			    struct i915_address_space *vm,
487
			    bool *need_relocs)
488
{
489
	struct drm_i915_gem_object *obj;
490
	struct list_head ordered_objects;
491 492
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	int retry;
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507

	INIT_LIST_HEAD(&ordered_objects);
	while (!list_empty(objects)) {
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

		obj = list_first_entry(objects,
				       struct drm_i915_gem_object,
				       exec_list);
		entry = obj->exec_entry;

		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
508
		need_mappable = need_fence || need_reloc_mappable(obj);
509 510 511 512 513

		if (need_mappable)
			list_move(&obj->exec_list, &ordered_objects);
		else
			list_move_tail(&obj->exec_list, &ordered_objects);
514

515
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
516
		obj->base.pending_write_domain = 0;
517
		obj->pending_fenced_gpu_access = false;
518 519
	}
	list_splice(&ordered_objects, objects);
520 521 522 523 524 525 526 527 528 529

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
530
	 * This avoid unnecessary unbinding of later objects in order to make
531 532 533 534
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
535
		int ret = 0;
536 537

		/* Unbind any ill-fitting objects or pin. */
538
		list_for_each_entry(obj, objects, exec_list) {
539
			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
540
			bool need_fence, need_mappable;
541
			u32 obj_offset;
542

543
			if (!i915_gem_obj_bound(obj, vm))
544 545
				continue;

546
			obj_offset = i915_gem_obj_offset(obj, vm);
547
			need_fence =
548
				has_fenced_gpu_access &&
549 550
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;
551
			need_mappable = need_fence || need_reloc_mappable(obj);
552

553 554 555
			WARN_ON((need_mappable || need_fence) &&
				!i915_is_ggtt(vm));

556
			if ((entry->alignment &&
557
			     obj_offset & (entry->alignment - 1)) ||
558
			    (need_mappable && !obj->map_and_fenceable))
559
				ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
560
			else
561
				ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
562
			if (ret)
563 564 565 566
				goto err;
		}

		/* Bind fresh objects */
567
		list_for_each_entry(obj, objects, exec_list) {
568
			if (i915_gem_obj_bound(obj, vm))
569
				continue;
570

571
			ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
572 573
			if (ret)
				goto err;
574 575
		}

576 577 578
err:		/* Decrement pin count for bound objects */
		list_for_each_entry(obj, objects, exec_list)
			i915_gem_execbuffer_unreserve_object(obj);
579

C
Chris Wilson 已提交
580
		if (ret != -ENOSPC || retry++)
581 582
			return ret;

C
Chris Wilson 已提交
583
		ret = i915_gem_evict_everything(ring->dev);
584 585 586 587 588 589 590
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
591
				  struct drm_i915_gem_execbuffer2 *args,
592
				  struct drm_file *file,
593
				  struct intel_ring_buffer *ring,
594
				  struct eb_objects *eb,
595 596
				  struct drm_i915_gem_exec_object2 *exec,
				  struct i915_address_space *vm)
597 598
{
	struct drm_i915_gem_relocation_entry *reloc;
599
	struct drm_i915_gem_object *obj;
600
	bool need_relocs;
601
	int *reloc_offset;
602
	int i, total, ret;
603
	int count = args->buffer_count;
604

605
	/* We may process another execbuffer during the unlock... */
606 607
	while (!list_empty(&eb->objects)) {
		obj = list_first_entry(&eb->objects,
608 609 610 611 612 613
				       struct drm_i915_gem_object,
				       exec_list);
		list_del_init(&obj->exec_list);
		drm_gem_object_unreference(&obj->base);
	}

614 615 616 617
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
618
		total += exec[i].relocation_count;
619

620
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
621
	reloc = drm_malloc_ab(total, sizeof(*reloc));
622 623 624
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
625 626 627 628 629 630 631
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
632 633
		u64 invalid_offset = (u64)-1;
		int j;
634

V
Ville Syrjälä 已提交
635
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
636 637

		if (copy_from_user(reloc+total, user_relocs,
638
				   exec[i].relocation_count * sizeof(*reloc))) {
639 640 641 642 643
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
			if (copy_to_user(&user_relocs[j].presumed_offset,
					 &invalid_offset,
					 sizeof(invalid_offset))) {
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

663
		reloc_offset[i] = total;
664
		total += exec[i].relocation_count;
665 666 667 668 669 670 671 672
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

673 674
	/* reacquire the objects */
	eb_reset(eb);
675
	ret = eb_lookup_objects(eb, exec, args, file);
676 677
	if (ret)
		goto err;
678

679
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
680
	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
681 682 683
	if (ret)
		goto err;

684
	list_for_each_entry(obj, &eb->objects, exec_list) {
685
		int offset = obj->exec_entry - exec;
686
		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
687 688
							       reloc + reloc_offset[offset],
							       vm);
689 690 691 692 693 694 695 696 697 698 699 700
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
701
	drm_free_large(reloc_offset);
702 703 704 705
	return ret;
}

static int
706 707
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
				struct list_head *objects)
708
{
709
	struct drm_i915_gem_object *obj;
710
	uint32_t flush_domains = 0;
711
	int ret;
712

713 714
	list_for_each_entry(obj, objects, exec_list) {
		ret = i915_gem_object_sync(obj, ring);
715 716
		if (ret)
			return ret;
717 718 719 720 721

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
			i915_gem_clflush_object(obj);

		flush_domains |= obj->base.write_domain;
722 723
	}

724
	if (flush_domains & I915_GEM_DOMAIN_CPU)
725
		i915_gem_chipset_flush(ring->dev);
726 727 728 729

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

730 731 732
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
733
	return intel_ring_invalidate_all_caches(ring);
734 735
}

736 737
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
738
{
739 740 741
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

742
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
743 744 745 746 747 748 749
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;
750 751
	int relocs_total = 0;
	int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
752 753

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
754
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
755 756
		int length; /* limited by fault_in_pages_readable() */

757 758 759
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
			return -EINVAL;

760 761 762 763 764
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
765
			return -EINVAL;
766
		relocs_total += exec[i].relocation_count;
767 768 769

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
770 771 772 773 774
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
775 776 777
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

778 779 780 781
		if (likely(!i915_prefault_disable)) {
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
782 783 784 785 786
	}

	return 0;
}

787 788
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
789
				   struct i915_address_space *vm,
790
				   struct intel_ring_buffer *ring)
791 792 793 794
{
	struct drm_i915_gem_object *obj;

	list_for_each_entry(obj, objects, exec_list) {
795 796
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
797

798
		obj->base.write_domain = obj->base.pending_write_domain;
799 800 801
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
802 803
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

804
		i915_gem_object_move_to_active(obj, ring);
805 806
		if (obj->base.write_domain) {
			obj->dirty = 1;
807
			obj->last_write_seqno = intel_ring_get_seqno(ring);
808
			if (obj->pin_count) /* check for potential scanout */
809
				intel_mark_fb_busy(obj, ring);
810 811
		}

C
Chris Wilson 已提交
812
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
813 814 815
	}
}

816 817
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
818
				    struct drm_file *file,
819 820
				    struct intel_ring_buffer *ring,
				    struct drm_i915_gem_object *obj)
821
{
822 823
	/* Unconditionally force add_request to emit a full flush. */
	ring->gpu_caches_dirty = true;
824

825
	/* Add a breadcrumb for the completion of the batch buffer */
826
	(void)__i915_add_request(ring, file, obj, NULL);
827
}
828

829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
			    struct intel_ring_buffer *ring)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret, i;

	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
		return 0;

	ret = intel_ring_begin(ring, 4 * 3);
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
	}

	intel_ring_advance(ring);

	return 0;
}

854 855 856 857
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
858 859
		       struct drm_i915_gem_exec_object2 *exec,
		       struct i915_address_space *vm)
860 861
{
	drm_i915_private_t *dev_priv = dev->dev_private;
862
	struct eb_objects *eb;
863 864 865
	struct drm_i915_gem_object *batch_obj;
	struct drm_clip_rect *cliprects = NULL;
	struct intel_ring_buffer *ring;
866
	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
867
	u32 exec_start, exec_len;
868
	u32 mask, flags;
869
	int ret, mode, i;
870
	bool need_relocs;
871

872
	if (!i915_gem_check_execbuffer(args))
873 874 875
		return -EINVAL;

	ret = validate_exec_list(exec, args->buffer_count);
876 877 878
	if (ret)
		return ret;

879 880 881 882 883 884 885
	flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

		flags |= I915_DISPATCH_SECURE;
	}
886 887
	if (args->flags & I915_EXEC_IS_PINNED)
		flags |= I915_DISPATCH_PINNED;
888

889 890 891
	switch (args->flags & I915_EXEC_RING_MASK) {
	case I915_EXEC_DEFAULT:
	case I915_EXEC_RENDER:
892
		ring = &dev_priv->ring[RCS];
893 894
		break;
	case I915_EXEC_BSD:
895
		ring = &dev_priv->ring[VCS];
896
		if (ctx_id != DEFAULT_CONTEXT_ID) {
897 898 899 900
			DRM_DEBUG("Ring %s doesn't support contexts\n",
				  ring->name);
			return -EPERM;
		}
901 902
		break;
	case I915_EXEC_BLT:
903
		ring = &dev_priv->ring[BCS];
904
		if (ctx_id != DEFAULT_CONTEXT_ID) {
905 906 907 908
			DRM_DEBUG("Ring %s doesn't support contexts\n",
				  ring->name);
			return -EPERM;
		}
909
		break;
910 911
	case I915_EXEC_VEBOX:
		ring = &dev_priv->ring[VECS];
912
		if (ctx_id != DEFAULT_CONTEXT_ID) {
913 914 915 916 917 918
			DRM_DEBUG("Ring %s doesn't support contexts\n",
				  ring->name);
			return -EPERM;
		}
		break;

919
	default:
920
		DRM_DEBUG("execbuf with unknown ring: %d\n",
921 922 923
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
924 925 926 927 928
	if (!intel_ring_initialized(ring)) {
		DRM_DEBUG("execbuf with invalid ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
929

930
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
931
	mask = I915_EXEC_CONSTANTS_MASK;
932 933 934 935 936 937 938 939 940 941 942 943
	switch (mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (ring == &dev_priv->ring[RCS] &&
		    mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4)
				return -EINVAL;

			if (INTEL_INFO(dev)->gen > 5 &&
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
				return -EINVAL;
944 945 946 947

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
948 949 950
		}
		break;
	default:
951
		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
952 953 954
		return -EINVAL;
	}

955
	if (args->buffer_count < 1) {
956
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
957 958 959 960
		return -EINVAL;
	}

	if (args->num_cliprects != 0) {
961
		if (ring != &dev_priv->ring[RCS]) {
962
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
963 964 965
			return -EINVAL;
		}

966 967 968 969 970
		if (INTEL_INFO(dev)->gen >= 5) {
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
			return -EINVAL;
		}

971 972 973 974 975
		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
			DRM_DEBUG("execbuf with %u cliprects\n",
				  args->num_cliprects);
			return -EINVAL;
		}
976

977
		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
978 979 980 981 982 983
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto pre_mutex_err;
		}

984
		if (copy_from_user(cliprects,
V
Ville Syrjälä 已提交
985 986
				   to_user_ptr(args->cliprects_ptr),
				   sizeof(*cliprects)*args->num_cliprects)) {
987 988 989 990 991 992 993 994 995
			ret = -EFAULT;
			goto pre_mutex_err;
		}
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

996
	if (dev_priv->ums.mm_suspended) {
997 998 999 1000 1001
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1002
	eb = eb_create(args);
1003 1004 1005 1006 1007 1008
	if (eb == NULL) {
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1009
	/* Look up object handles */
1010
	ret = eb_lookup_objects(eb, exec, args, file);
1011 1012
	if (ret)
		goto err;
1013

1014
	/* take note of the batch buffer before we might reorder the lists */
1015
	batch_obj = list_entry(eb->objects.prev,
1016 1017 1018
			       struct drm_i915_gem_object,
			       exec_list);

1019
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1020
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1021
	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
1022 1023 1024 1025
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1026
	if (need_relocs)
1027
		ret = i915_gem_execbuffer_relocate(eb, vm);
1028 1029
	if (ret) {
		if (ret == -EFAULT) {
1030
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1031
								eb, exec, vm);
1032 1033 1034 1035 1036 1037 1038 1039
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1040
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1041 1042 1043 1044 1045
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1046 1047 1048 1049 1050 1051 1052
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
	 * hsw should have this fixed, but let's be paranoid and do it
	 * unconditionally for now. */
	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);

1053
	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
1054
	if (ret)
1055 1056
		goto err;

1057 1058 1059 1060
	ret = i915_switch_context(ring, file, ctx_id);
	if (ret)
		goto err;

1061 1062 1063 1064 1065 1066 1067 1068 1069
	if (ring == &dev_priv->ring[RCS] &&
	    mode != dev_priv->relative_constants_mode) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
				goto err;

		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, INSTPM);
1070
		intel_ring_emit(ring, mask << 16 | mode);
1071 1072 1073 1074 1075
		intel_ring_advance(ring);

		dev_priv->relative_constants_mode = mode;
	}

1076 1077 1078 1079 1080 1081
	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		if (ret)
			goto err;
	}

1082 1083
	exec_start = i915_gem_obj_offset(batch_obj, vm) +
		args->batch_start_offset;
1084 1085 1086 1087 1088 1089 1090 1091 1092
	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto err;

			ret = ring->dispatch_execbuffer(ring,
1093 1094
							exec_start, exec_len,
							flags);
1095 1096 1097 1098
			if (ret)
				goto err;
		}
	} else {
1099 1100 1101
		ret = ring->dispatch_execbuffer(ring,
						exec_start, exec_len,
						flags);
1102 1103 1104
		if (ret)
			goto err;
	}
1105

1106 1107
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);

1108
	i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
1109
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1110 1111

err:
1112
	eb_destroy(eb);
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	kfree(cliprects);
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
1129
	struct drm_i915_private *dev_priv = dev->dev_private;
1130 1131 1132 1133 1134 1135 1136
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1137
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1138 1139 1140 1141 1142 1143 1144
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1145
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1146 1147 1148 1149 1150 1151
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1152
			     to_user_ptr(args->buffers_ptr),
1153 1154
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1155
		DRM_DEBUG("copy %d exec entries failed %d\n",
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1183
	i915_execbuffer2_set_context_id(exec2, 0);
1184

1185 1186
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
				     &dev_priv->gtt.base);
1187 1188 1189 1190 1191
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
V
Ville Syrjälä 已提交
1192
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1193 1194 1195 1196
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1197
			DRM_DEBUG("failed to copy %d exec entries "
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
1212
	struct drm_i915_private *dev_priv = dev->dev_private;
1213 1214 1215 1216
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1217 1218
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1219
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1220 1221 1222
		return -EINVAL;
	}

1223
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1224
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1225 1226 1227
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1228
	if (exec2_list == NULL) {
1229
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1230 1231 1232 1233
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1234
			     to_user_ptr(args->buffers_ptr),
1235 1236
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1237
		DRM_DEBUG("copy %d exec entries failed %d\n",
1238 1239 1240 1241 1242
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1243 1244
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
				     &dev_priv->gtt.base);
1245 1246
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
V
Ville Syrjälä 已提交
1247
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1248 1249 1250 1251
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1252
			DRM_DEBUG("failed to copy %d exec entries "
1253 1254 1255 1256 1257 1258 1259 1260
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}