i915_gem_execbuffer.c 36.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35

36 37 38
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)

39 40
struct eb_vmas {
	struct list_head vmas;
41
	int and;
42
	union {
43
		struct i915_vma *lut[0];
44 45
		struct hlist_head buckets[0];
	};
46 47
};

48
static struct eb_vmas *
B
Ben Widawsky 已提交
49
eb_create(struct drm_i915_gem_execbuffer2 *args)
50
{
51
	struct eb_vmas *eb = NULL;
52 53

	if (args->flags & I915_EXEC_HANDLE_LUT) {
54
		unsigned size = args->buffer_count;
55 56
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
57 58 59 60
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
61 62
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
63
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
64 65 66
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
67
			     sizeof(struct eb_vmas),
68 69 70 71 72 73 74 75
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

76
	INIT_LIST_HEAD(&eb->vmas);
77 78 79 80
	return eb;
}

static void
81
eb_reset(struct eb_vmas *eb)
82
{
83 84
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
85 86
}

87
static int
88 89 90 91 92
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
93
{
94
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
95 96 97
	struct drm_i915_gem_object *obj;
	struct list_head objects;
	int i, ret = 0;
98

99
	INIT_LIST_HEAD(&objects);
100
	spin_lock(&file->table_lock);
101 102
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
103
	for (i = 0; i < args->buffer_count; i++) {
104 105 106 107 108
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
109 110
			ret = -ENOENT;
			goto out;
111 112
		}

113
		if (!list_empty(&obj->obj_exec_link)) {
114 115 116
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
117 118
			ret = -EINVAL;
			goto out;
119 120 121
		}

		drm_gem_object_reference(&obj->base);
122 123 124
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
125

126 127 128
	i = 0;
	list_for_each_entry(obj, &objects, obj_exec_link) {
		struct i915_vma *vma;
129 130
		struct i915_address_space *bind_vm = vm;

131 132 133 134 135 136
		if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
		    USES_FULL_PPGTT(vm->dev)) {
			ret = -EINVAL;
			goto out;
		}

137 138 139
		/* If we have secure dispatch, or the userspace assures us that
		 * they know what they're doing, use the GGTT VM.
		 */
140
		if (((args->flags & I915_EXEC_SECURE) &&
141 142
		    (i == (args->buffer_count - 1))))
			bind_vm = &dev_priv->gtt.base;
143

144 145 146 147 148 149 150 151
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
152
		vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
153 154 155 156 157 158 159 160 161
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
			goto out;
		}

		list_add_tail(&vma->exec_list, &eb->vmas);

		vma->exec_entry = &exec[i];
162
		if (eb->and < 0) {
163
			eb->lut[i] = vma;
164 165
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
166 167
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
168 169
				       &eb->buckets[handle & eb->and]);
		}
170
		++i;
171 172
	}

173 174 175 176 177 178 179 180 181 182 183

out:
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
		if (ret)
			drm_gem_object_unreference(&obj->base);
	}
	return ret;
184 185
}

186
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187
{
188 189 190 191 192 193 194
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
		struct hlist_node *node;
195

196 197
		head = &eb->buckets[handle & eb->and];
		hlist_for_each(node, head) {
198
			struct i915_vma *vma;
199

200 201 202
			vma = hlist_entry(node, struct i915_vma, exec_node);
			if (vma->exec_handle == handle)
				return vma;
203 204 205
		}
		return NULL;
	}
206 207
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
223
		vma->pin_count--;
224 225 226 227 228 229

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static void eb_destroy(struct eb_vmas *eb)
{
230 231
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
232

233 234
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
235
				       exec_list);
236
		list_del_init(&vma->exec_list);
237
		i915_gem_execbuffer_unreserve_vma(vma);
238
		drm_gem_object_unreference(&vma->obj->base);
239
	}
240 241 242
	kfree(eb);
}

243 244
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
245 246
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
247
		!obj->map_and_fenceable ||
248 249 250
		obj->cache_level != I915_CACHE_NONE);
}

251 252 253 254
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
255
	struct drm_device *dev = obj->base.dev;
256 257 258 259
	uint32_t page_offset = offset_in_page(reloc->offset);
	char *vaddr;
	int ret = -EINVAL;

260
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
261 262 263 264 265 266
	if (ret)
		return ret;

	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
				reloc->offset >> PAGE_SHIFT));
	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
267 268 269 270 271 272 273 274 275 276 277 278 279

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

		*(uint32_t *)(vaddr + page_offset) = 0;
	}

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t __iomem *reloc_entry;
	void __iomem *reloc_page;
	int ret = -EINVAL;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
	reloc->offset += i915_gem_obj_ggtt_offset(obj);
	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
			reloc->offset & PAGE_MASK);
	reloc_entry = (uint32_t __iomem *)
		(reloc_page + offset_in_page(reloc->offset));
	iowrite32(reloc->delta, reloc_entry);
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324

	if (INTEL_INFO(dev)->gen >= 8) {
		reloc_entry += 1;

		if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
			io_mapping_unmap_atomic(reloc_page);
			reloc_page = io_mapping_map_atomic_wc(
					dev_priv->gtt.mappable,
					reloc->offset + sizeof(uint32_t));
			reloc_entry = reloc_page;
		}

		iowrite32(0, reloc_entry);
	}

325 326 327 328 329
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

330 331
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
332
				   struct eb_vmas *eb,
333
				   struct drm_i915_gem_relocation_entry *reloc)
334 335 336
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
337
	struct drm_i915_gem_object *target_i915_obj;
338
	struct i915_vma *target_vma;
339 340 341
	uint32_t target_offset;
	int ret = -EINVAL;

342
	/* we've already hold a reference to all valid objects */
343 344
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
345
		return -ENOENT;
346 347
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
348

349
	target_offset = target_vma->node.start;
350

351 352 353 354 355 356
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
	    !target_i915_obj->has_global_gtt_mapping)) {
357 358 359
		struct i915_vma *vma =
			list_first_entry(&target_i915_obj->vma_list,
					 typeof(*vma), vma_link);
360
		vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
361 362
	}

363
	/* Validate that the target is in a valid r/w GPU domain */
364
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
365
		DRM_DEBUG("reloc with multiple write domains: "
366 367 368 369 370 371
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
372
		return ret;
373
	}
374 375
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
376
		DRM_DEBUG("reloc with read/write non-GPU domains: "
377 378 379 380 381 382
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
383
		return ret;
384 385 386 387 388 389 390 391 392
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
393
		return 0;
394 395

	/* Check that the relocation address is valid... */
396 397
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
398
		DRM_DEBUG("Relocation beyond object bounds: "
399 400 401 402
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
403
		return ret;
404
	}
405
	if (unlikely(reloc->offset & 3)) {
406
		DRM_DEBUG("Relocation not 4-byte aligned: "
407 408 409
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
410
		return ret;
411 412
	}

413 414 415 416
	/* We can't wait for rendering with pagefaults disabled */
	if (obj->active && in_atomic())
		return -EFAULT;

417
	reloc->delta += target_offset;
418 419 420 421
	if (use_cpu_reloc(obj))
		ret = relocate_entry_cpu(obj, reloc);
	else
		ret = relocate_entry_gtt(obj, reloc);
422

423 424 425
	if (ret)
		return ret;

426 427 428
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

429
	return 0;
430 431 432
}

static int
433 434
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
435
{
436 437
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
438
	struct drm_i915_gem_relocation_entry __user *user_relocs;
439
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
440
	int remain, ret;
441

V
Ville Syrjälä 已提交
442
	user_relocs = to_user_ptr(entry->relocs_ptr);
443

444 445 446 447 448 449 450 451 452
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
453 454
			return -EFAULT;

455 456
		do {
			u64 offset = r->presumed_offset;
457

458
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
459 460 461 462 463 464 465 466 467 468 469 470 471
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
472 473 474
	}

	return 0;
475
#undef N_RELOC
476 477 478
}

static int
479 480 481
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
482
{
483
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
484 485 486
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
487
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
488 489 490 491 492 493 494 495
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
496
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
497
{
498
	struct i915_vma *vma;
499 500 501 502 503 504 505 506 507 508
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
509 510
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
511
		if (ret)
512
			break;
513
	}
514
	pagefault_enable();
515

516
	return ret;
517 518
}

519
static int
520
need_reloc_mappable(struct i915_vma *vma)
521
{
522 523 524
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
		i915_is_ggtt(vma->vm);
525 526
}

527
static int
528 529 530
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
				struct intel_ring_buffer *ring,
				bool *need_reloc)
531
{
532
	struct drm_i915_gem_object *obj = vma->obj;
533
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
534 535
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	bool need_fence, need_mappable;
536 537
	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
538 539 540 541 542 543
	int ret;

	need_fence =
		has_fenced_gpu_access &&
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
		obj->tiling_mode != I915_TILING_NONE;
544
	need_mappable = need_fence || need_reloc_mappable(vma);
545

546
	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
547
				  false);
548 549 550
	if (ret)
		return ret;

551 552
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

553 554
	if (has_fenced_gpu_access) {
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
555
			ret = i915_gem_object_get_fence(obj);
556
			if (ret)
557
				return ret;
558

559
			if (i915_gem_object_pin_fence(obj))
560
				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
561

562
			obj->pending_fenced_gpu_access = true;
563 564 565
		}
	}

566 567
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
568 569 570 571 572 573 574 575
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

576
	vma->bind_vma(vma, obj->cache_level, flags);
577

578
	return 0;
579
}
580

581
static int
582
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
583
			    struct list_head *vmas,
584
			    bool *need_relocs)
585
{
586
	struct drm_i915_gem_object *obj;
587
	struct i915_vma *vma;
588
	struct i915_address_space *vm;
589
	struct list_head ordered_vmas;
590 591
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	int retry;
592

593 594 595 596 597
	if (list_empty(vmas))
		return 0;

	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

598 599
	INIT_LIST_HEAD(&ordered_vmas);
	while (!list_empty(vmas)) {
600 601 602
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

603 604 605
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
606 607 608 609 610

		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
611
		need_mappable = need_fence || need_reloc_mappable(vma);
612 613

		if (need_mappable)
614
			list_move(&vma->exec_list, &ordered_vmas);
615
		else
616
			list_move_tail(&vma->exec_list, &ordered_vmas);
617

618
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
619
		obj->base.pending_write_domain = 0;
620
		obj->pending_fenced_gpu_access = false;
621
	}
622
	list_splice(&ordered_vmas, vmas);
623 624 625 626 627 628 629 630 631 632

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
633
	 * This avoid unnecessary unbinding of later objects in order to make
634 635 636 637
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
638
		int ret = 0;
639 640

		/* Unbind any ill-fitting objects or pin. */
641 642
		list_for_each_entry(vma, vmas, exec_list) {
			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
643
			bool need_fence, need_mappable;
644

645 646 647
			obj = vma->obj;

			if (!drm_mm_node_allocated(&vma->node))
648 649 650
				continue;

			need_fence =
651
				has_fenced_gpu_access &&
652 653
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;
654
			need_mappable = need_fence || need_reloc_mappable(vma);
655

656
			WARN_ON((need_mappable || need_fence) &&
657
			       !i915_is_ggtt(vma->vm));
658

659
			if ((entry->alignment &&
660
			     vma->node.start & (entry->alignment - 1)) ||
661
			    (need_mappable && !obj->map_and_fenceable))
662
				ret = i915_vma_unbind(vma);
663
			else
664
				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
665
			if (ret)
666 667 668 669
				goto err;
		}

		/* Bind fresh objects */
670 671
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
672
				continue;
673

674
			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
675 676
			if (ret)
				goto err;
677 678
		}

679
err:
C
Chris Wilson 已提交
680
		if (ret != -ENOSPC || retry++)
681 682
			return ret;

683 684 685 686
		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

687
		ret = i915_gem_evict_vm(vm, true);
688 689 690 691 692 693 694
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
695
				  struct drm_i915_gem_execbuffer2 *args,
696
				  struct drm_file *file,
697
				  struct intel_ring_buffer *ring,
698 699
				  struct eb_vmas *eb,
				  struct drm_i915_gem_exec_object2 *exec)
700 701
{
	struct drm_i915_gem_relocation_entry *reloc;
702 703
	struct i915_address_space *vm;
	struct i915_vma *vma;
704
	bool need_relocs;
705
	int *reloc_offset;
706
	int i, total, ret;
707
	unsigned count = args->buffer_count;
708

709 710 711 712 713
	if (WARN_ON(list_empty(&eb->vmas)))
		return 0;

	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

714
	/* We may process another execbuffer during the unlock... */
715 716 717
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
718
		i915_gem_execbuffer_unreserve_vma(vma);
719
		drm_gem_object_unreference(&vma->obj->base);
720 721
	}

722 723 724 725
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
726
		total += exec[i].relocation_count;
727

728
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
729
	reloc = drm_malloc_ab(total, sizeof(*reloc));
730 731 732
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
733 734 735 736 737 738 739
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
740 741
		u64 invalid_offset = (u64)-1;
		int j;
742

V
Ville Syrjälä 已提交
743
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
744 745

		if (copy_from_user(reloc+total, user_relocs,
746
				   exec[i].relocation_count * sizeof(*reloc))) {
747 748 749 750 751
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
			if (copy_to_user(&user_relocs[j].presumed_offset,
					 &invalid_offset,
					 sizeof(invalid_offset))) {
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

771
		reloc_offset[i] = total;
772
		total += exec[i].relocation_count;
773 774 775 776 777 778 779 780
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

781 782
	/* reacquire the objects */
	eb_reset(eb);
783
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
784 785
	if (ret)
		goto err;
786

787
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
788
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
789 790 791
	if (ret)
		goto err;

792 793 794 795
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
796 797 798 799 800 801 802 803 804 805 806 807
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
808
	drm_free_large(reloc_offset);
809 810 811 812
	return ret;
}

static int
813
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
814
				struct list_head *vmas)
815
{
816
	struct i915_vma *vma;
817
	uint32_t flush_domains = 0;
818
	bool flush_chipset = false;
819
	int ret;
820

821 822
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
823
		ret = i915_gem_object_sync(obj, ring);
824 825
		if (ret)
			return ret;
826 827

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
828
			flush_chipset |= i915_gem_clflush_object(obj, false);
829 830

		flush_domains |= obj->base.write_domain;
831 832
	}

833
	if (flush_chipset)
834
		i915_gem_chipset_flush(ring->dev);
835 836 837 838

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

839 840 841
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
842
	return intel_ring_invalidate_all_caches(ring);
843 844
}

845 846
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
847
{
848 849 850
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

851
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
852 853 854 855 856 857 858
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;
859 860
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
861 862

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
863
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
864 865
		int length; /* limited by fault_in_pages_readable() */

866 867 868
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
			return -EINVAL;

869 870 871 872 873
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
874
			return -EINVAL;
875
		relocs_total += exec[i].relocation_count;
876 877 878

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
879 880 881 882 883
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
884 885 886
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

887 888 889 890
		if (likely(!i915_prefault_disable)) {
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
891 892 893 894 895
	}

	return 0;
}

896
static struct i915_hw_context *
897
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
898
			  struct intel_ring_buffer *ring, const u32 ctx_id)
899
{
900
	struct i915_hw_context *ctx = NULL;
901 902
	struct i915_ctx_hang_stats *hs;

903 904 905
	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
		return ERR_PTR(-EINVAL);

906
	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
907
	if (IS_ERR(ctx))
908
		return ctx;
909

910
	hs = &ctx->hang_stats;
911 912
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
913
		return ERR_PTR(-EIO);
914 915
	}

916
	return ctx;
917 918
}

919
static void
920
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
921
				   struct intel_ring_buffer *ring)
922
{
923
	struct i915_vma *vma;
924

925 926
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
927 928
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
929

930
		obj->base.write_domain = obj->base.pending_write_domain;
931 932 933
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
934 935
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

B
Ben Widawsky 已提交
936
		i915_vma_move_to_active(vma, ring);
937 938
		if (obj->base.write_domain) {
			obj->dirty = 1;
939
			obj->last_write_seqno = intel_ring_get_seqno(ring);
B
Ben Widawsky 已提交
940 941 942
			/* check for potential scanout */
			if (i915_gem_obj_ggtt_bound(obj) &&
			    i915_gem_obj_to_ggtt(obj)->pin_count)
943
				intel_mark_fb_busy(obj, ring);
944 945
		}

C
Chris Wilson 已提交
946
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
947 948 949
	}
}

950 951
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
952
				    struct drm_file *file,
953 954
				    struct intel_ring_buffer *ring,
				    struct drm_i915_gem_object *obj)
955
{
956 957
	/* Unconditionally force add_request to emit a full flush. */
	ring->gpu_caches_dirty = true;
958

959
	/* Add a breadcrumb for the completion of the batch buffer */
960
	(void)__i915_add_request(ring, file, obj, NULL);
961
}
962

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
			    struct intel_ring_buffer *ring)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret, i;

	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
		return 0;

	ret = intel_ring_begin(ring, 4 * 3);
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
	}

	intel_ring_advance(ring);

	return 0;
}

988 989 990 991
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
992
		       struct drm_i915_gem_exec_object2 *exec)
993 994
{
	drm_i915_private_t *dev_priv = dev->dev_private;
995
	struct eb_vmas *eb;
996 997 998
	struct drm_i915_gem_object *batch_obj;
	struct drm_clip_rect *cliprects = NULL;
	struct intel_ring_buffer *ring;
999 1000
	struct i915_hw_context *ctx;
	struct i915_address_space *vm;
1001
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1002
	u32 exec_start = args->batch_start_offset, exec_len;
1003
	u32 mask, flags;
1004
	int ret, mode, i;
1005
	bool need_relocs;
1006

1007
	if (!i915_gem_check_execbuffer(args))
1008 1009 1010
		return -EINVAL;

	ret = validate_exec_list(exec, args->buffer_count);
1011 1012 1013
	if (ret)
		return ret;

1014 1015 1016 1017 1018 1019 1020
	flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

		flags |= I915_DISPATCH_SECURE;
	}
1021 1022
	if (args->flags & I915_EXEC_IS_PINNED)
		flags |= I915_DISPATCH_PINNED;
1023

1024
	if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1025
		DRM_DEBUG("execbuf with unknown ring: %d\n",
1026 1027 1028
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1029 1030 1031 1032 1033 1034

	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
		ring = &dev_priv->ring[RCS];
	else
		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];

1035 1036 1037 1038 1039
	if (!intel_ring_initialized(ring)) {
		DRM_DEBUG("execbuf with invalid ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1040

1041
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1042
	mask = I915_EXEC_CONSTANTS_MASK;
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	switch (mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (ring == &dev_priv->ring[RCS] &&
		    mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4)
				return -EINVAL;

			if (INTEL_INFO(dev)->gen > 5 &&
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
				return -EINVAL;
1055 1056 1057 1058

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1059 1060 1061
		}
		break;
	default:
1062
		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1063 1064 1065
		return -EINVAL;
	}

1066
	if (args->buffer_count < 1) {
1067
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1068 1069 1070 1071
		return -EINVAL;
	}

	if (args->num_cliprects != 0) {
1072
		if (ring != &dev_priv->ring[RCS]) {
1073
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1074 1075 1076
			return -EINVAL;
		}

1077 1078 1079 1080 1081
		if (INTEL_INFO(dev)->gen >= 5) {
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
			return -EINVAL;
		}

1082 1083 1084 1085 1086
		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
			DRM_DEBUG("execbuf with %u cliprects\n",
				  args->num_cliprects);
			return -EINVAL;
		}
1087

D
Daniel Vetter 已提交
1088 1089
		cliprects = kcalloc(args->num_cliprects,
				    sizeof(*cliprects),
1090 1091 1092 1093 1094 1095
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto pre_mutex_err;
		}

1096
		if (copy_from_user(cliprects,
V
Ville Syrjälä 已提交
1097 1098
				   to_user_ptr(args->cliprects_ptr),
				   sizeof(*cliprects)*args->num_cliprects)) {
1099 1100 1101 1102 1103 1104 1105 1106 1107
			ret = -EFAULT;
			goto pre_mutex_err;
		}
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1108
	if (dev_priv->ums.mm_suspended) {
1109 1110 1111 1112 1113
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1114
	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1115
	if (IS_ERR(ctx)) {
1116
		mutex_unlock(&dev->struct_mutex);
1117
		ret = PTR_ERR(ctx);
1118
		goto pre_mutex_err;
1119 1120 1121 1122
	} 

	i915_gem_context_reference(ctx);

1123 1124 1125
	vm = ctx->vm;
	if (!USES_FULL_PPGTT(dev))
		vm = &dev_priv->gtt.base;
1126

B
Ben Widawsky 已提交
1127
	eb = eb_create(args);
1128 1129 1130 1131 1132 1133
	if (eb == NULL) {
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1134
	/* Look up object handles */
1135
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1136 1137
	if (ret)
		goto err;
1138

1139
	/* take note of the batch buffer before we might reorder the lists */
1140
	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1141

1142
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1143
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1144
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1145 1146 1147 1148
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1149
	if (need_relocs)
B
Ben Widawsky 已提交
1150
		ret = i915_gem_execbuffer_relocate(eb);
1151 1152
	if (ret) {
		if (ret == -EFAULT) {
1153
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1154
								eb, exec);
1155 1156 1157 1158 1159 1160 1161 1162
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1163
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1164 1165 1166 1167 1168
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1169 1170
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1171
	 * hsw should have this fixed, but bdw mucks it up again. */
1172 1173 1174 1175 1176 1177 1178 1179
	if (flags & I915_DISPATCH_SECURE &&
	    !batch_obj->has_global_gtt_mapping) {
		/* When we have multiple VMs, we'll need to make sure that we
		 * allocate space first */
		struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
		BUG_ON(!vma);
		vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
	}
1180

1181 1182 1183 1184 1185
	if (flags & I915_DISPATCH_SECURE)
		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
	else
		exec_start += i915_gem_obj_offset(batch_obj, vm);

1186
	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1187
	if (ret)
1188 1189
		goto err;

1190
	ret = i915_switch_context(ring, file, ctx);
1191 1192 1193
	if (ret)
		goto err;

1194 1195 1196 1197 1198 1199 1200 1201 1202
	if (ring == &dev_priv->ring[RCS] &&
	    mode != dev_priv->relative_constants_mode) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
				goto err;

		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, INSTPM);
1203
		intel_ring_emit(ring, mask << 16 | mode);
1204 1205 1206 1207 1208
		intel_ring_advance(ring);

		dev_priv->relative_constants_mode = mode;
	}

1209 1210 1211 1212 1213 1214
	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		if (ret)
			goto err;
	}

1215

1216 1217 1218 1219 1220 1221 1222 1223 1224
	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto err;

			ret = ring->dispatch_execbuffer(ring,
1225 1226
							exec_start, exec_len,
							flags);
1227 1228 1229 1230
			if (ret)
				goto err;
		}
	} else {
1231 1232 1233
		ret = ring->dispatch_execbuffer(ring,
						exec_start, exec_len,
						flags);
1234 1235 1236
		if (ret)
			goto err;
	}
1237

1238 1239
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);

1240
	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1241
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1242 1243

err:
1244 1245
	/* the request owns the ref now */
	i915_gem_context_unreference(ctx);
1246
	eb_destroy(eb);
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	kfree(cliprects);
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1270
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1271 1272 1273 1274 1275 1276 1277
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1278
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1279 1280 1281 1282 1283 1284
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1285
			     to_user_ptr(args->buffers_ptr),
1286 1287
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1288
		DRM_DEBUG("copy %d exec entries failed %d\n",
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1316
	i915_execbuffer2_set_context_id(exec2, 0);
1317

1318
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1319 1320 1321 1322 1323
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
V
Ville Syrjälä 已提交
1324
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1325 1326 1327 1328
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1329
			DRM_DEBUG("failed to copy %d exec entries "
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1348 1349
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1350
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1351 1352 1353
		return -EINVAL;
	}

1354
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1355
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1356 1357 1358
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1359
	if (exec2_list == NULL) {
1360
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1361 1362 1363 1364
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1365
			     to_user_ptr(args->buffers_ptr),
1366 1367
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1368
		DRM_DEBUG("copy %d exec entries failed %d\n",
1369 1370 1371 1372 1373
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1374
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1375 1376
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
V
Ville Syrjälä 已提交
1377
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1378 1379 1380 1381
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1382
			DRM_DEBUG("failed to copy %d exec entries "
1383 1384 1385 1386 1387 1388 1389 1390
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}