i915_gem_execbuffer.c 36.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35

36 37 38
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)

39 40
struct eb_vmas {
	struct list_head vmas;
41
	int and;
42
	union {
43
		struct i915_vma *lut[0];
44 45
		struct hlist_head buckets[0];
	};
46 47
};

48
static struct eb_vmas *
B
Ben Widawsky 已提交
49
eb_create(struct drm_i915_gem_execbuffer2 *args)
50
{
51
	struct eb_vmas *eb = NULL;
52 53

	if (args->flags & I915_EXEC_HANDLE_LUT) {
54
		unsigned size = args->buffer_count;
55 56
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
57 58 59 60
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
61 62
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
63
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
64 65 66
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
67
			     sizeof(struct eb_vmas),
68 69 70 71 72 73 74 75
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

76
	INIT_LIST_HEAD(&eb->vmas);
77 78 79 80
	return eb;
}

static void
81
eb_reset(struct eb_vmas *eb)
82
{
83 84
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
85 86
}

87
static int
88 89 90 91 92
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
93
{
94
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
95 96 97
	struct drm_i915_gem_object *obj;
	struct list_head objects;
	int i, ret = 0;
98

99
	INIT_LIST_HEAD(&objects);
100
	spin_lock(&file->table_lock);
101 102
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
103
	for (i = 0; i < args->buffer_count; i++) {
104 105 106 107 108
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
109 110
			ret = -ENOENT;
			goto out;
111 112
		}

113
		if (!list_empty(&obj->obj_exec_link)) {
114 115 116
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
117 118
			ret = -EINVAL;
			goto out;
119 120 121
		}

		drm_gem_object_reference(&obj->base);
122 123 124
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
125

126 127 128
	i = 0;
	list_for_each_entry(obj, &objects, obj_exec_link) {
		struct i915_vma *vma;
129 130
		struct i915_address_space *bind_vm = vm;

131 132 133 134 135 136
		if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
		    USES_FULL_PPGTT(vm->dev)) {
			ret = -EINVAL;
			goto out;
		}

137 138 139 140 141 142 143
		/* If we have secure dispatch, or the userspace assures us that
		 * they know what they're doing, use the GGTT VM.
		 */
		if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT ||
		    ((args->flags & I915_EXEC_SECURE) &&
		    (i == (args->buffer_count - 1))))
			bind_vm = &dev_priv->gtt.base;
144

145 146 147 148 149 150 151 152
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
153
		vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
154 155 156 157 158 159 160 161 162
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
			goto out;
		}

		list_add_tail(&vma->exec_list, &eb->vmas);

		vma->exec_entry = &exec[i];
163
		if (eb->and < 0) {
164
			eb->lut[i] = vma;
165 166
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
167 168
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
169 170
				       &eb->buckets[handle & eb->and]);
		}
171
		++i;
172 173
	}

174 175 176 177 178 179 180 181 182 183 184

out:
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
		if (ret)
			drm_gem_object_unreference(&obj->base);
	}
	return ret;
185 186
}

187
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
188
{
189 190 191 192 193 194 195
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
		struct hlist_node *node;
196

197 198
		head = &eb->buckets[handle & eb->and];
		hlist_for_each(node, head) {
199
			struct i915_vma *vma;
200

201 202 203
			vma = hlist_entry(node, struct i915_vma, exec_node);
			if (vma->exec_handle == handle)
				return vma;
204 205 206
		}
		return NULL;
	}
207 208
}

209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
224
		vma->pin_count--;
225 226 227 228 229 230

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static void eb_destroy(struct eb_vmas *eb)
{
231 232
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
233

234 235
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
236
				       exec_list);
237
		list_del_init(&vma->exec_list);
238
		i915_gem_execbuffer_unreserve_vma(vma);
239
		drm_gem_object_unreference(&vma->obj->base);
240
	}
241 242 243
	kfree(eb);
}

244 245
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
246 247
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
248
		!obj->map_and_fenceable ||
249 250 251
		obj->cache_level != I915_CACHE_NONE);
}

252 253 254 255
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
256
	struct drm_device *dev = obj->base.dev;
257 258 259 260
	uint32_t page_offset = offset_in_page(reloc->offset);
	char *vaddr;
	int ret = -EINVAL;

261
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
262 263 264 265 266 267
	if (ret)
		return ret;

	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
				reloc->offset >> PAGE_SHIFT));
	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
268 269 270 271 272 273 274 275 276 277 278 279 280

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

		*(uint32_t *)(vaddr + page_offset) = 0;
	}

281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t __iomem *reloc_entry;
	void __iomem *reloc_page;
	int ret = -EINVAL;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
	reloc->offset += i915_gem_obj_ggtt_offset(obj);
	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
			reloc->offset & PAGE_MASK);
	reloc_entry = (uint32_t __iomem *)
		(reloc_page + offset_in_page(reloc->offset));
	iowrite32(reloc->delta, reloc_entry);
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325

	if (INTEL_INFO(dev)->gen >= 8) {
		reloc_entry += 1;

		if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
			io_mapping_unmap_atomic(reloc_page);
			reloc_page = io_mapping_map_atomic_wc(
					dev_priv->gtt.mappable,
					reloc->offset + sizeof(uint32_t));
			reloc_entry = reloc_page;
		}

		iowrite32(0, reloc_entry);
	}

326 327 328 329 330
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

331 332
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
333
				   struct eb_vmas *eb,
334
				   struct drm_i915_gem_relocation_entry *reloc)
335 336 337
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
338
	struct drm_i915_gem_object *target_i915_obj;
339
	struct i915_vma *target_vma;
340 341 342
	uint32_t target_offset;
	int ret = -EINVAL;

343
	/* we've already hold a reference to all valid objects */
344 345
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
346
		return -ENOENT;
347 348
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
349

350
	target_offset = target_vma->node.start;
351

352 353 354 355 356 357
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
	    !target_i915_obj->has_global_gtt_mapping)) {
358 359 360
		struct i915_vma *vma =
			list_first_entry(&target_i915_obj->vma_list,
					 typeof(*vma), vma_link);
361
		vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
362 363
	}

364
	/* Validate that the target is in a valid r/w GPU domain */
365
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
366
		DRM_DEBUG("reloc with multiple write domains: "
367 368 369 370 371 372
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
373
		return ret;
374
	}
375 376
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
377
		DRM_DEBUG("reloc with read/write non-GPU domains: "
378 379 380 381 382 383
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
384
		return ret;
385 386 387 388 389 390 391 392 393
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
394
		return 0;
395 396

	/* Check that the relocation address is valid... */
397 398
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
399
		DRM_DEBUG("Relocation beyond object bounds: "
400 401 402 403
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
404
		return ret;
405
	}
406
	if (unlikely(reloc->offset & 3)) {
407
		DRM_DEBUG("Relocation not 4-byte aligned: "
408 409 410
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
411
		return ret;
412 413
	}

414 415 416 417
	/* We can't wait for rendering with pagefaults disabled */
	if (obj->active && in_atomic())
		return -EFAULT;

418
	reloc->delta += target_offset;
419 420 421 422
	if (use_cpu_reloc(obj))
		ret = relocate_entry_cpu(obj, reloc);
	else
		ret = relocate_entry_gtt(obj, reloc);
423

424 425 426
	if (ret)
		return ret;

427 428 429
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

430
	return 0;
431 432 433
}

static int
434 435
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
436
{
437 438
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
439
	struct drm_i915_gem_relocation_entry __user *user_relocs;
440
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
441
	int remain, ret;
442

V
Ville Syrjälä 已提交
443
	user_relocs = to_user_ptr(entry->relocs_ptr);
444

445 446 447 448 449 450 451 452 453
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
454 455
			return -EFAULT;

456 457
		do {
			u64 offset = r->presumed_offset;
458

459
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
460 461 462 463 464 465 466 467 468 469 470 471 472
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
473 474 475
	}

	return 0;
476
#undef N_RELOC
477 478 479
}

static int
480 481 482
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
483
{
484
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
485 486 487
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
488
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
489 490 491 492 493 494 495 496
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
497
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
498
{
499
	struct i915_vma *vma;
500 501 502 503 504 505 506 507 508 509
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
510 511
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
512
		if (ret)
513
			break;
514
	}
515
	pagefault_enable();
516

517
	return ret;
518 519
}

520
static int
521
need_reloc_mappable(struct i915_vma *vma)
522
{
523 524 525
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
		i915_is_ggtt(vma->vm);
526 527
}

528
static int
529 530 531
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
				struct intel_ring_buffer *ring,
				bool *need_reloc)
532
{
533
	struct drm_i915_gem_object *obj = vma->obj;
534
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
535 536
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	bool need_fence, need_mappable;
537 538
	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
539 540 541 542 543 544
	int ret;

	need_fence =
		has_fenced_gpu_access &&
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
		obj->tiling_mode != I915_TILING_NONE;
545
	need_mappable = need_fence || need_reloc_mappable(vma);
546

547
	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
548
				  false);
549 550 551
	if (ret)
		return ret;

552 553
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

554 555
	if (has_fenced_gpu_access) {
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
556
			ret = i915_gem_object_get_fence(obj);
557
			if (ret)
558
				return ret;
559

560
			if (i915_gem_object_pin_fence(obj))
561
				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
562

563
			obj->pending_fenced_gpu_access = true;
564 565 566
		}
	}

567 568
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
569 570 571 572 573 574 575 576
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

577
	vma->bind_vma(vma, obj->cache_level, flags);
578

579
	return 0;
580
}
581

582
static int
583
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
584
			    struct list_head *vmas,
585
			    bool *need_relocs)
586
{
587
	struct drm_i915_gem_object *obj;
588
	struct i915_vma *vma;
589
	struct i915_address_space *vm;
590
	struct list_head ordered_vmas;
591 592
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	int retry;
593

594 595 596 597 598
	if (list_empty(vmas))
		return 0;

	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

599 600
	INIT_LIST_HEAD(&ordered_vmas);
	while (!list_empty(vmas)) {
601 602 603
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

604 605 606
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
607 608 609 610 611

		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
612
		need_mappable = need_fence || need_reloc_mappable(vma);
613 614

		if (need_mappable)
615
			list_move(&vma->exec_list, &ordered_vmas);
616
		else
617
			list_move_tail(&vma->exec_list, &ordered_vmas);
618

619
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
620
		obj->base.pending_write_domain = 0;
621
		obj->pending_fenced_gpu_access = false;
622
	}
623
	list_splice(&ordered_vmas, vmas);
624 625 626 627 628 629 630 631 632 633

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
634
	 * This avoid unnecessary unbinding of later objects in order to make
635 636 637 638
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
639
		int ret = 0;
640 641

		/* Unbind any ill-fitting objects or pin. */
642 643
		list_for_each_entry(vma, vmas, exec_list) {
			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
644
			bool need_fence, need_mappable;
645

646 647 648
			obj = vma->obj;

			if (!drm_mm_node_allocated(&vma->node))
649 650 651
				continue;

			need_fence =
652
				has_fenced_gpu_access &&
653 654
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;
655
			need_mappable = need_fence || need_reloc_mappable(vma);
656

657
			WARN_ON((need_mappable || need_fence) &&
658
			       !i915_is_ggtt(vma->vm));
659

660
			if ((entry->alignment &&
661
			     vma->node.start & (entry->alignment - 1)) ||
662
			    (need_mappable && !obj->map_and_fenceable))
663
				ret = i915_vma_unbind(vma);
664
			else
665
				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
666
			if (ret)
667 668 669 670
				goto err;
		}

		/* Bind fresh objects */
671 672
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
673
				continue;
674

675
			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
676 677
			if (ret)
				goto err;
678 679
		}

680
err:
C
Chris Wilson 已提交
681
		if (ret != -ENOSPC || retry++)
682 683
			return ret;

684 685 686 687
		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

688
		ret = i915_gem_evict_vm(vm, true);
689 690 691 692 693 694 695
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
696
				  struct drm_i915_gem_execbuffer2 *args,
697
				  struct drm_file *file,
698
				  struct intel_ring_buffer *ring,
699 700
				  struct eb_vmas *eb,
				  struct drm_i915_gem_exec_object2 *exec)
701 702
{
	struct drm_i915_gem_relocation_entry *reloc;
703 704
	struct i915_address_space *vm;
	struct i915_vma *vma;
705
	bool need_relocs;
706
	int *reloc_offset;
707
	int i, total, ret;
708
	unsigned count = args->buffer_count;
709

710 711 712 713 714
	if (WARN_ON(list_empty(&eb->vmas)))
		return 0;

	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

715
	/* We may process another execbuffer during the unlock... */
716 717 718
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
719
		i915_gem_execbuffer_unreserve_vma(vma);
720
		drm_gem_object_unreference(&vma->obj->base);
721 722
	}

723 724 725 726
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
727
		total += exec[i].relocation_count;
728

729
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
730
	reloc = drm_malloc_ab(total, sizeof(*reloc));
731 732 733
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
734 735 736 737 738 739 740
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
741 742
		u64 invalid_offset = (u64)-1;
		int j;
743

V
Ville Syrjälä 已提交
744
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
745 746

		if (copy_from_user(reloc+total, user_relocs,
747
				   exec[i].relocation_count * sizeof(*reloc))) {
748 749 750 751 752
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
			if (copy_to_user(&user_relocs[j].presumed_offset,
					 &invalid_offset,
					 sizeof(invalid_offset))) {
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

772
		reloc_offset[i] = total;
773
		total += exec[i].relocation_count;
774 775 776 777 778 779 780 781
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

782 783
	/* reacquire the objects */
	eb_reset(eb);
784
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
785 786
	if (ret)
		goto err;
787

788
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
789
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
790 791 792
	if (ret)
		goto err;

793 794 795 796
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
797 798 799 800 801 802 803 804 805 806 807 808
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
809
	drm_free_large(reloc_offset);
810 811 812 813
	return ret;
}

static int
814
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
815
				struct list_head *vmas)
816
{
817
	struct i915_vma *vma;
818
	uint32_t flush_domains = 0;
819
	bool flush_chipset = false;
820
	int ret;
821

822 823
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
824
		ret = i915_gem_object_sync(obj, ring);
825 826
		if (ret)
			return ret;
827 828

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
829
			flush_chipset |= i915_gem_clflush_object(obj, false);
830 831

		flush_domains |= obj->base.write_domain;
832 833
	}

834
	if (flush_chipset)
835
		i915_gem_chipset_flush(ring->dev);
836 837 838 839

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

840 841 842
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
843
	return intel_ring_invalidate_all_caches(ring);
844 845
}

846 847
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
848
{
849 850 851
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

852
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
853 854 855 856 857 858 859
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;
860 861
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
862 863

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
864
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
865 866
		int length; /* limited by fault_in_pages_readable() */

867 868 869
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
			return -EINVAL;

870 871 872 873 874
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
875
			return -EINVAL;
876
		relocs_total += exec[i].relocation_count;
877 878 879

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
880 881 882 883 884
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
885 886 887
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

888 889 890 891
		if (likely(!i915_prefault_disable)) {
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
892 893 894 895 896
	}

	return 0;
}

897
static struct i915_hw_context *
898
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
899
			  struct intel_ring_buffer *ring, const u32 ctx_id)
900
{
901
	struct i915_hw_context *ctx = NULL;
902 903
	struct i915_ctx_hang_stats *hs;

904 905 906
	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
		return ERR_PTR(-EINVAL);

907 908 909
	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
	if (IS_ERR_OR_NULL(ctx))
		return ctx;
910

911
	hs = &ctx->hang_stats;
912 913
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
914
		return ERR_PTR(-EIO);
915 916
	}

917
	return ctx;
918 919
}

920
static void
921
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
922
				   struct intel_ring_buffer *ring)
923
{
924
	struct i915_vma *vma;
925

926 927
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
928 929
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
930

931
		obj->base.write_domain = obj->base.pending_write_domain;
932 933 934
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
935 936
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

B
Ben Widawsky 已提交
937
		i915_vma_move_to_active(vma, ring);
938 939
		if (obj->base.write_domain) {
			obj->dirty = 1;
940
			obj->last_write_seqno = intel_ring_get_seqno(ring);
B
Ben Widawsky 已提交
941 942 943
			/* check for potential scanout */
			if (i915_gem_obj_ggtt_bound(obj) &&
			    i915_gem_obj_to_ggtt(obj)->pin_count)
944
				intel_mark_fb_busy(obj, ring);
945 946
		}

C
Chris Wilson 已提交
947
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
948 949 950
	}
}

951 952
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
953
				    struct drm_file *file,
954 955
				    struct intel_ring_buffer *ring,
				    struct drm_i915_gem_object *obj)
956
{
957 958
	/* Unconditionally force add_request to emit a full flush. */
	ring->gpu_caches_dirty = true;
959

960
	/* Add a breadcrumb for the completion of the batch buffer */
961
	(void)__i915_add_request(ring, file, obj, NULL);
962
}
963

964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
			    struct intel_ring_buffer *ring)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret, i;

	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
		return 0;

	ret = intel_ring_begin(ring, 4 * 3);
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
	}

	intel_ring_advance(ring);

	return 0;
}

989 990 991 992
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
993
		       struct drm_i915_gem_exec_object2 *exec)
994 995
{
	drm_i915_private_t *dev_priv = dev->dev_private;
996
	struct eb_vmas *eb;
997 998 999
	struct drm_i915_gem_object *batch_obj;
	struct drm_clip_rect *cliprects = NULL;
	struct intel_ring_buffer *ring;
1000 1001
	struct i915_hw_context *ctx;
	struct i915_address_space *vm;
1002
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1003
	u32 exec_start = args->batch_start_offset, exec_len;
1004
	u32 mask, flags;
1005
	int ret, mode, i;
1006
	bool need_relocs;
1007

1008
	if (!i915_gem_check_execbuffer(args))
1009 1010 1011
		return -EINVAL;

	ret = validate_exec_list(exec, args->buffer_count);
1012 1013 1014
	if (ret)
		return ret;

1015 1016 1017 1018 1019 1020 1021
	flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

		flags |= I915_DISPATCH_SECURE;
	}
1022 1023
	if (args->flags & I915_EXEC_IS_PINNED)
		flags |= I915_DISPATCH_PINNED;
1024

1025
	if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1026
		DRM_DEBUG("execbuf with unknown ring: %d\n",
1027 1028 1029
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1030 1031 1032 1033 1034 1035

	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
		ring = &dev_priv->ring[RCS];
	else
		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];

1036 1037 1038 1039 1040
	if (!intel_ring_initialized(ring)) {
		DRM_DEBUG("execbuf with invalid ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1041

1042
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1043
	mask = I915_EXEC_CONSTANTS_MASK;
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
	switch (mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (ring == &dev_priv->ring[RCS] &&
		    mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4)
				return -EINVAL;

			if (INTEL_INFO(dev)->gen > 5 &&
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
				return -EINVAL;
1056 1057 1058 1059

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1060 1061 1062
		}
		break;
	default:
1063
		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1064 1065 1066
		return -EINVAL;
	}

1067
	if (args->buffer_count < 1) {
1068
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1069 1070 1071 1072
		return -EINVAL;
	}

	if (args->num_cliprects != 0) {
1073
		if (ring != &dev_priv->ring[RCS]) {
1074
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1075 1076 1077
			return -EINVAL;
		}

1078 1079 1080 1081 1082
		if (INTEL_INFO(dev)->gen >= 5) {
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
			return -EINVAL;
		}

1083 1084 1085 1086 1087
		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
			DRM_DEBUG("execbuf with %u cliprects\n",
				  args->num_cliprects);
			return -EINVAL;
		}
1088

D
Daniel Vetter 已提交
1089 1090
		cliprects = kcalloc(args->num_cliprects,
				    sizeof(*cliprects),
1091 1092 1093 1094 1095 1096
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto pre_mutex_err;
		}

1097
		if (copy_from_user(cliprects,
V
Ville Syrjälä 已提交
1098 1099
				   to_user_ptr(args->cliprects_ptr),
				   sizeof(*cliprects)*args->num_cliprects)) {
1100 1101 1102 1103 1104 1105 1106 1107 1108
			ret = -EFAULT;
			goto pre_mutex_err;
		}
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1109
	if (dev_priv->ums.mm_suspended) {
1110 1111 1112 1113 1114
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1115
	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1116
	if (IS_ERR_OR_NULL(ctx)) {
1117
		mutex_unlock(&dev->struct_mutex);
1118
		ret = PTR_ERR(ctx);
1119
		goto pre_mutex_err;
1120 1121 1122 1123
	} 

	i915_gem_context_reference(ctx);

1124 1125 1126
	vm = ctx->vm;
	if (!USES_FULL_PPGTT(dev))
		vm = &dev_priv->gtt.base;
1127

B
Ben Widawsky 已提交
1128
	eb = eb_create(args);
1129 1130 1131 1132 1133 1134
	if (eb == NULL) {
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1135
	/* Look up object handles */
1136
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1137 1138
	if (ret)
		goto err;
1139

1140
	/* take note of the batch buffer before we might reorder the lists */
1141
	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1142

1143
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1144
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1145
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1146 1147 1148 1149
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1150
	if (need_relocs)
B
Ben Widawsky 已提交
1151
		ret = i915_gem_execbuffer_relocate(eb);
1152 1153
	if (ret) {
		if (ret == -EFAULT) {
1154
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1155
								eb, exec);
1156 1157 1158 1159 1160 1161 1162 1163
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1164
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1165 1166 1167 1168 1169
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1170 1171
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1172
	 * hsw should have this fixed, but bdw mucks it up again. */
1173 1174 1175 1176 1177 1178 1179 1180
	if (flags & I915_DISPATCH_SECURE &&
	    !batch_obj->has_global_gtt_mapping) {
		/* When we have multiple VMs, we'll need to make sure that we
		 * allocate space first */
		struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
		BUG_ON(!vma);
		vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
	}
1181

1182 1183 1184 1185 1186
	if (flags & I915_DISPATCH_SECURE)
		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
	else
		exec_start += i915_gem_obj_offset(batch_obj, vm);

1187
	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1188
	if (ret)
1189 1190
		goto err;

1191
	ret = i915_switch_context(ring, file, ctx);
1192 1193 1194
	if (ret)
		goto err;

1195 1196 1197 1198 1199 1200 1201 1202 1203
	if (ring == &dev_priv->ring[RCS] &&
	    mode != dev_priv->relative_constants_mode) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
				goto err;

		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, INSTPM);
1204
		intel_ring_emit(ring, mask << 16 | mode);
1205 1206 1207 1208 1209
		intel_ring_advance(ring);

		dev_priv->relative_constants_mode = mode;
	}

1210 1211 1212 1213 1214 1215
	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		if (ret)
			goto err;
	}

1216

1217 1218 1219 1220 1221 1222 1223 1224 1225
	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto err;

			ret = ring->dispatch_execbuffer(ring,
1226 1227
							exec_start, exec_len,
							flags);
1228 1229 1230 1231
			if (ret)
				goto err;
		}
	} else {
1232 1233 1234
		ret = ring->dispatch_execbuffer(ring,
						exec_start, exec_len,
						flags);
1235 1236 1237
		if (ret)
			goto err;
	}
1238

1239 1240
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);

1241
	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1242
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1243 1244

err:
1245 1246
	/* the request owns the ref now */
	i915_gem_context_unreference(ctx);
1247
	eb_destroy(eb);
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	kfree(cliprects);
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1271
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1272 1273 1274 1275 1276 1277 1278
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1279
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1280 1281 1282 1283 1284 1285
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1286
			     to_user_ptr(args->buffers_ptr),
1287 1288
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1289
		DRM_DEBUG("copy %d exec entries failed %d\n",
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1317
	i915_execbuffer2_set_context_id(exec2, 0);
1318

1319
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1320 1321 1322 1323 1324
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
V
Ville Syrjälä 已提交
1325
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1326 1327 1328 1329
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1330
			DRM_DEBUG("failed to copy %d exec entries "
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1349 1350
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1351
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1352 1353 1354
		return -EINVAL;
	}

1355
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1356
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1357 1358 1359
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1360
	if (exec2_list == NULL) {
1361
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1362 1363 1364 1365
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1366
			     to_user_ptr(args->buffers_ptr),
1367 1368
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1369
		DRM_DEBUG("copy %d exec entries failed %d\n",
1370 1371 1372 1373 1374
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1375
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1376 1377
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
V
Ville Syrjälä 已提交
1378
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1379 1380 1381 1382
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1383
			DRM_DEBUG("failed to copy %d exec entries "
1384 1385 1386 1387 1388 1389 1390 1391
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}