i915_gem_execbuffer.c 36.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35

36 37
struct eb_vmas {
	struct list_head vmas;
38
	int and;
39
	union {
40
		struct i915_vma *lut[0];
41 42
		struct hlist_head buckets[0];
	};
43 44
};

45
static struct eb_vmas *
B
Ben Widawsky 已提交
46
eb_create(struct drm_i915_gem_execbuffer2 *args)
47
{
48
	struct eb_vmas *eb = NULL;
49 50

	if (args->flags & I915_EXEC_HANDLE_LUT) {
51
		unsigned size = args->buffer_count;
52 53
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
54 55 56 57
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
58 59
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
60
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 62 63
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
64
			     sizeof(struct eb_vmas),
65 66 67 68 69 70 71 72
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

73
	INIT_LIST_HEAD(&eb->vmas);
74 75 76 77
	return eb;
}

static void
78
eb_reset(struct eb_vmas *eb)
79
{
80 81
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
82 83
}

84
static int
85 86 87 88 89
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
90
{
91
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
92 93 94
	struct drm_i915_gem_object *obj;
	struct list_head objects;
	int i, ret = 0;
95

96
	INIT_LIST_HEAD(&objects);
97
	spin_lock(&file->table_lock);
98 99
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
100
	for (i = 0; i < args->buffer_count; i++) {
101 102 103 104 105
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
106 107
			ret = -ENOENT;
			goto out;
108 109
		}

110
		if (!list_empty(&obj->obj_exec_link)) {
111 112 113
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
114 115
			ret = -EINVAL;
			goto out;
116 117 118
		}

		drm_gem_object_reference(&obj->base);
119 120 121
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
122

123 124 125
	i = 0;
	list_for_each_entry(obj, &objects, obj_exec_link) {
		struct i915_vma *vma;
126 127 128 129 130 131 132 133 134
		struct i915_address_space *bind_vm = vm;

		/* If we have secure dispatch, or the userspace assures us that
		 * they know what they're doing, use the GGTT VM.
		 */
		if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT ||
		    ((args->flags & I915_EXEC_SECURE) &&
		    (i == (args->buffer_count - 1))))
			bind_vm = &dev_priv->gtt.base;
135

136 137 138 139 140 141 142 143
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
144
		vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
145 146 147 148 149 150 151 152 153
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
			goto out;
		}

		list_add_tail(&vma->exec_list, &eb->vmas);

		vma->exec_entry = &exec[i];
154
		if (eb->and < 0) {
155
			eb->lut[i] = vma;
156 157
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
158 159
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
160 161
				       &eb->buckets[handle & eb->and]);
		}
162
		++i;
163 164
	}

165 166 167 168 169 170 171 172 173 174 175

out:
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
		if (ret)
			drm_gem_object_unreference(&obj->base);
	}
	return ret;
176 177
}

178
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
179
{
180 181 182 183 184 185 186
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
		struct hlist_node *node;
187

188 189
		head = &eb->buckets[handle & eb->and];
		hlist_for_each(node, head) {
190
			struct i915_vma *vma;
191

192 193 194
			vma = hlist_entry(node, struct i915_vma, exec_node);
			if (vma->exec_handle == handle)
				return vma;
195 196 197
		}
		return NULL;
	}
198 199
}

200 201 202
static void eb_destroy(struct eb_vmas *eb) {
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
203

204 205
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
206
				       exec_list);
207 208
		list_del_init(&vma->exec_list);
		drm_gem_object_unreference(&vma->obj->base);
209
	}
210 211 212
	kfree(eb);
}

213 214
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
215 216
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
217
		!obj->map_and_fenceable ||
218 219 220
		obj->cache_level != I915_CACHE_NONE);
}

221 222 223 224
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
225
	struct drm_device *dev = obj->base.dev;
226 227 228 229
	uint32_t page_offset = offset_in_page(reloc->offset);
	char *vaddr;
	int ret = -EINVAL;

230
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
231 232 233 234 235 236
	if (ret)
		return ret;

	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
				reloc->offset >> PAGE_SHIFT));
	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
237 238 239 240 241 242 243 244 245 246 247 248 249

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

		*(uint32_t *)(vaddr + page_offset) = 0;
	}

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t __iomem *reloc_entry;
	void __iomem *reloc_page;
	int ret = -EINVAL;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
	reloc->offset += i915_gem_obj_ggtt_offset(obj);
	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
			reloc->offset & PAGE_MASK);
	reloc_entry = (uint32_t __iomem *)
		(reloc_page + offset_in_page(reloc->offset));
	iowrite32(reloc->delta, reloc_entry);
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

	if (INTEL_INFO(dev)->gen >= 8) {
		reloc_entry += 1;

		if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
			io_mapping_unmap_atomic(reloc_page);
			reloc_page = io_mapping_map_atomic_wc(
					dev_priv->gtt.mappable,
					reloc->offset + sizeof(uint32_t));
			reloc_entry = reloc_page;
		}

		iowrite32(0, reloc_entry);
	}

295 296 297 298 299
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

300 301
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
302
				   struct eb_vmas *eb,
303
				   struct drm_i915_gem_relocation_entry *reloc)
304 305 306
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
307
	struct drm_i915_gem_object *target_i915_obj;
308
	struct i915_vma *target_vma;
309 310 311
	uint32_t target_offset;
	int ret = -EINVAL;

312
	/* we've already hold a reference to all valid objects */
313 314
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
315
		return -ENOENT;
316 317
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
318

319
	target_offset = target_vma->node.start;
320

321 322 323 324 325 326
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
	    !target_i915_obj->has_global_gtt_mapping)) {
327 328 329
		struct i915_vma *vma =
			list_first_entry(&target_i915_obj->vma_list,
					 typeof(*vma), vma_link);
330
		vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
331 332
	}

333
	/* Validate that the target is in a valid r/w GPU domain */
334
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
335
		DRM_DEBUG("reloc with multiple write domains: "
336 337 338 339 340 341
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
342
		return ret;
343
	}
344 345
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
346
		DRM_DEBUG("reloc with read/write non-GPU domains: "
347 348 349 350 351 352
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
353
		return ret;
354 355 356 357 358 359 360 361 362
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
363
		return 0;
364 365

	/* Check that the relocation address is valid... */
366 367
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
368
		DRM_DEBUG("Relocation beyond object bounds: "
369 370 371 372
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
373
		return ret;
374
	}
375
	if (unlikely(reloc->offset & 3)) {
376
		DRM_DEBUG("Relocation not 4-byte aligned: "
377 378 379
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
380
		return ret;
381 382
	}

383 384 385 386
	/* We can't wait for rendering with pagefaults disabled */
	if (obj->active && in_atomic())
		return -EFAULT;

387
	reloc->delta += target_offset;
388 389 390 391
	if (use_cpu_reloc(obj))
		ret = relocate_entry_cpu(obj, reloc);
	else
		ret = relocate_entry_gtt(obj, reloc);
392

393 394 395
	if (ret)
		return ret;

396 397 398
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

399
	return 0;
400 401 402
}

static int
403 404
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
405
{
406 407
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
408
	struct drm_i915_gem_relocation_entry __user *user_relocs;
409
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
410
	int remain, ret;
411

V
Ville Syrjälä 已提交
412
	user_relocs = to_user_ptr(entry->relocs_ptr);
413

414 415 416 417 418 419 420 421 422
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
423 424
			return -EFAULT;

425 426
		do {
			u64 offset = r->presumed_offset;
427

428
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
429 430 431 432 433 434 435 436 437 438 439 440 441
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
442 443 444
	}

	return 0;
445
#undef N_RELOC
446 447 448
}

static int
449 450 451
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
452
{
453
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
454 455 456
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
457
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
458 459 460 461 462 463 464 465
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
466
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
467
{
468
	struct i915_vma *vma;
469 470 471 472 473 474 475 476 477 478
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
479 480
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
481
		if (ret)
482
			break;
483
	}
484
	pagefault_enable();
485

486
	return ret;
487 488
}

489 490
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
491

492
static int
493
need_reloc_mappable(struct i915_vma *vma)
494
{
495 496 497
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
		i915_is_ggtt(vma->vm);
498 499
}

500
static int
501 502 503
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
				struct intel_ring_buffer *ring,
				bool *need_reloc)
504
{
505
	struct drm_i915_gem_object *obj = vma->obj;
506
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
507 508
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	bool need_fence, need_mappable;
509 510
	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
511 512 513 514 515 516
	int ret;

	need_fence =
		has_fenced_gpu_access &&
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
		obj->tiling_mode != I915_TILING_NONE;
517
	need_mappable = need_fence || need_reloc_mappable(vma);
518

519
	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
520
				  false);
521 522 523
	if (ret)
		return ret;

524 525
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

526 527
	if (has_fenced_gpu_access) {
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
528
			ret = i915_gem_object_get_fence(obj);
529
			if (ret)
530
				return ret;
531

532
			if (i915_gem_object_pin_fence(obj))
533
				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
534

535
			obj->pending_fenced_gpu_access = true;
536 537 538
		}
	}

539 540
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
541 542 543 544 545 546 547 548
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

549
	vma->bind_vma(vma, obj->cache_level, flags);
550

551
	return 0;
552
}
553

554
static void
555
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
556 557
{
	struct drm_i915_gem_exec_object2 *entry;
558
	struct drm_i915_gem_object *obj = vma->obj;
559

560
	if (!drm_mm_node_allocated(&vma->node))
561 562
		return;

563
	entry = vma->exec_entry;
564 565 566 567 568

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
B
Ben Widawsky 已提交
569
		vma->pin_count--;
570 571

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
572 573
}

574
static int
575
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
576
			    struct list_head *vmas,
577
			    bool *need_relocs)
578
{
579
	struct drm_i915_gem_object *obj;
580
	struct i915_vma *vma;
581
	struct i915_address_space *vm;
582
	struct list_head ordered_vmas;
583 584
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	int retry;
585

586 587 588 589 590
	if (list_empty(vmas))
		return 0;

	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

591 592
	INIT_LIST_HEAD(&ordered_vmas);
	while (!list_empty(vmas)) {
593 594 595
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

596 597 598
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
599 600 601 602 603

		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
604
		need_mappable = need_fence || need_reloc_mappable(vma);
605 606

		if (need_mappable)
607
			list_move(&vma->exec_list, &ordered_vmas);
608
		else
609
			list_move_tail(&vma->exec_list, &ordered_vmas);
610

611
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
612
		obj->base.pending_write_domain = 0;
613
		obj->pending_fenced_gpu_access = false;
614
	}
615
	list_splice(&ordered_vmas, vmas);
616 617 618 619 620 621 622 623 624 625

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
626
	 * This avoid unnecessary unbinding of later objects in order to make
627 628 629 630
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
631
		int ret = 0;
632 633

		/* Unbind any ill-fitting objects or pin. */
634 635
		list_for_each_entry(vma, vmas, exec_list) {
			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
636
			bool need_fence, need_mappable;
637

638 639 640
			obj = vma->obj;

			if (!drm_mm_node_allocated(&vma->node))
641 642 643
				continue;

			need_fence =
644
				has_fenced_gpu_access &&
645 646
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;
647
			need_mappable = need_fence || need_reloc_mappable(vma);
648

649
			WARN_ON((need_mappable || need_fence) &&
650
			       !i915_is_ggtt(vma->vm));
651

652
			if ((entry->alignment &&
653
			     vma->node.start & (entry->alignment - 1)) ||
654
			    (need_mappable && !obj->map_and_fenceable))
655
				ret = i915_vma_unbind(vma);
656
			else
657
				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
658
			if (ret)
659 660 661 662
				goto err;
		}

		/* Bind fresh objects */
663 664
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
665
				continue;
666

667
			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
668 669
			if (ret)
				goto err;
670 671
		}

672
err:		/* Decrement pin count for bound objects */
673 674
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);
675

C
Chris Wilson 已提交
676
		if (ret != -ENOSPC || retry++)
677 678
			return ret;

679
		ret = i915_gem_evict_vm(vm, true);
680 681 682 683 684 685 686
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
687
				  struct drm_i915_gem_execbuffer2 *args,
688
				  struct drm_file *file,
689
				  struct intel_ring_buffer *ring,
690 691
				  struct eb_vmas *eb,
				  struct drm_i915_gem_exec_object2 *exec)
692 693
{
	struct drm_i915_gem_relocation_entry *reloc;
694 695
	struct i915_address_space *vm;
	struct i915_vma *vma;
696
	bool need_relocs;
697
	int *reloc_offset;
698
	int i, total, ret;
699
	unsigned count = args->buffer_count;
700

701 702 703 704 705
	if (WARN_ON(list_empty(&eb->vmas)))
		return 0;

	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

706
	/* We may process another execbuffer during the unlock... */
707 708 709 710
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
		drm_gem_object_unreference(&vma->obj->base);
711 712
	}

713 714 715 716
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
717
		total += exec[i].relocation_count;
718

719
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
720
	reloc = drm_malloc_ab(total, sizeof(*reloc));
721 722 723
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
724 725 726 727 728 729 730
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
731 732
		u64 invalid_offset = (u64)-1;
		int j;
733

V
Ville Syrjälä 已提交
734
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
735 736

		if (copy_from_user(reloc+total, user_relocs,
737
				   exec[i].relocation_count * sizeof(*reloc))) {
738 739 740 741 742
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
			if (copy_to_user(&user_relocs[j].presumed_offset,
					 &invalid_offset,
					 sizeof(invalid_offset))) {
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

762
		reloc_offset[i] = total;
763
		total += exec[i].relocation_count;
764 765 766 767 768 769 770 771
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

772 773
	/* reacquire the objects */
	eb_reset(eb);
774
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
775 776
	if (ret)
		goto err;
777

778
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
779
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
780 781 782
	if (ret)
		goto err;

783 784 785 786
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
787 788 789 790 791 792 793 794 795 796 797 798
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
799
	drm_free_large(reloc_offset);
800 801 802 803
	return ret;
}

static int
804
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
805
				struct list_head *vmas)
806
{
807
	struct i915_vma *vma;
808
	uint32_t flush_domains = 0;
809
	bool flush_chipset = false;
810
	int ret;
811

812 813
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
814
		ret = i915_gem_object_sync(obj, ring);
815 816
		if (ret)
			return ret;
817 818

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
819
			flush_chipset |= i915_gem_clflush_object(obj, false);
820 821

		flush_domains |= obj->base.write_domain;
822 823
	}

824
	if (flush_chipset)
825
		i915_gem_chipset_flush(ring->dev);
826 827 828 829

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

830 831 832
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
833
	return intel_ring_invalidate_all_caches(ring);
834 835
}

836 837
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
838
{
839 840 841
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

842
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
843 844 845 846 847 848 849
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;
850 851
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
852 853

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
854
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
855 856
		int length; /* limited by fault_in_pages_readable() */

857 858 859
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
			return -EINVAL;

860 861 862 863 864
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
865
			return -EINVAL;
866
		relocs_total += exec[i].relocation_count;
867 868 869

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
870 871 872 873 874
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
875 876 877
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

878 879 880 881
		if (likely(!i915_prefault_disable)) {
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
882 883 884 885 886
	}

	return 0;
}

887
static struct i915_hw_context *
888 889 890
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
			  const u32 ctx_id)
{
891
	struct i915_hw_context *ctx = NULL;
892 893
	struct i915_ctx_hang_stats *hs;

894 895 896
	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
	if (IS_ERR_OR_NULL(ctx))
		return ctx;
897

898
	hs = &ctx->hang_stats;
899 900
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
901
		return ERR_PTR(-EIO);
902 903
	}

904
	return ctx;
905 906
}

907
static void
908
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
909
				   struct intel_ring_buffer *ring)
910
{
911
	struct i915_vma *vma;
912

913 914
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
915 916
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
917

918
		obj->base.write_domain = obj->base.pending_write_domain;
919 920 921
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
922 923
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

B
Ben Widawsky 已提交
924
		i915_vma_move_to_active(vma, ring);
925 926
		if (obj->base.write_domain) {
			obj->dirty = 1;
927
			obj->last_write_seqno = intel_ring_get_seqno(ring);
B
Ben Widawsky 已提交
928 929 930
			/* check for potential scanout */
			if (i915_gem_obj_ggtt_bound(obj) &&
			    i915_gem_obj_to_ggtt(obj)->pin_count)
931
				intel_mark_fb_busy(obj, ring);
932 933
		}

C
Chris Wilson 已提交
934
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
935 936 937
	}
}

938 939
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
940
				    struct drm_file *file,
941 942
				    struct intel_ring_buffer *ring,
				    struct drm_i915_gem_object *obj)
943
{
944 945
	/* Unconditionally force add_request to emit a full flush. */
	ring->gpu_caches_dirty = true;
946

947
	/* Add a breadcrumb for the completion of the batch buffer */
948
	(void)__i915_add_request(ring, file, obj, NULL);
949
}
950

951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
			    struct intel_ring_buffer *ring)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret, i;

	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
		return 0;

	ret = intel_ring_begin(ring, 4 * 3);
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
	}

	intel_ring_advance(ring);

	return 0;
}

976 977 978 979
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
980
		       struct drm_i915_gem_exec_object2 *exec)
981 982
{
	drm_i915_private_t *dev_priv = dev->dev_private;
983
	struct eb_vmas *eb;
984 985 986
	struct drm_i915_gem_object *batch_obj;
	struct drm_clip_rect *cliprects = NULL;
	struct intel_ring_buffer *ring;
987 988
	struct i915_hw_context *ctx;
	struct i915_address_space *vm;
989
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
990
	u32 exec_start, exec_len;
991
	u32 mask, flags;
992
	int ret, mode, i;
993
	bool need_relocs;
994

995
	if (!i915_gem_check_execbuffer(args))
996 997 998
		return -EINVAL;

	ret = validate_exec_list(exec, args->buffer_count);
999 1000 1001
	if (ret)
		return ret;

1002 1003 1004 1005 1006 1007 1008
	flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

		flags |= I915_DISPATCH_SECURE;
	}
1009 1010
	if (args->flags & I915_EXEC_IS_PINNED)
		flags |= I915_DISPATCH_PINNED;
1011

1012
	if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1013
		DRM_DEBUG("execbuf with unknown ring: %d\n",
1014 1015 1016
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1017 1018 1019 1020 1021 1022

	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
		ring = &dev_priv->ring[RCS];
	else
		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];

1023 1024 1025 1026 1027
	if (!intel_ring_initialized(ring)) {
		DRM_DEBUG("execbuf with invalid ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1028

1029
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1030
	mask = I915_EXEC_CONSTANTS_MASK;
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	switch (mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (ring == &dev_priv->ring[RCS] &&
		    mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4)
				return -EINVAL;

			if (INTEL_INFO(dev)->gen > 5 &&
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
				return -EINVAL;
1043 1044 1045 1046

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1047 1048 1049
		}
		break;
	default:
1050
		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1051 1052 1053
		return -EINVAL;
	}

1054
	if (args->buffer_count < 1) {
1055
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1056 1057 1058 1059
		return -EINVAL;
	}

	if (args->num_cliprects != 0) {
1060
		if (ring != &dev_priv->ring[RCS]) {
1061
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1062 1063 1064
			return -EINVAL;
		}

1065 1066 1067 1068 1069
		if (INTEL_INFO(dev)->gen >= 5) {
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
			return -EINVAL;
		}

1070 1071 1072 1073 1074
		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
			DRM_DEBUG("execbuf with %u cliprects\n",
				  args->num_cliprects);
			return -EINVAL;
		}
1075

D
Daniel Vetter 已提交
1076 1077
		cliprects = kcalloc(args->num_cliprects,
				    sizeof(*cliprects),
1078 1079 1080 1081 1082 1083
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto pre_mutex_err;
		}

1084
		if (copy_from_user(cliprects,
V
Ville Syrjälä 已提交
1085 1086
				   to_user_ptr(args->cliprects_ptr),
				   sizeof(*cliprects)*args->num_cliprects)) {
1087 1088 1089 1090 1091 1092 1093 1094 1095
			ret = -EFAULT;
			goto pre_mutex_err;
		}
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1096
	if (dev_priv->ums.mm_suspended) {
1097 1098 1099 1100 1101
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1102 1103
	ctx = i915_gem_validate_context(dev, file, ctx_id);
	if (IS_ERR_OR_NULL(ctx)) {
1104
		mutex_unlock(&dev->struct_mutex);
1105
		ret = PTR_ERR(ctx);
1106
		goto pre_mutex_err;
1107 1108 1109 1110 1111 1112 1113
	} 

	i915_gem_context_reference(ctx);

	/* HACK until we have full PPGTT */
	/* vm = ctx->vm; */
	vm = &dev_priv->gtt.base;
1114

B
Ben Widawsky 已提交
1115
	eb = eb_create(args);
1116 1117 1118 1119 1120 1121
	if (eb == NULL) {
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1122
	/* Look up object handles */
1123
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1124 1125
	if (ret)
		goto err;
1126

1127
	/* take note of the batch buffer before we might reorder the lists */
1128
	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1129

1130
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1131
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1132
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1133 1134 1135 1136
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1137
	if (need_relocs)
B
Ben Widawsky 已提交
1138
		ret = i915_gem_execbuffer_relocate(eb);
1139 1140
	if (ret) {
		if (ret == -EFAULT) {
1141
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1142
								eb, exec);
1143 1144 1145 1146 1147 1148 1149 1150
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1151
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1152 1153 1154 1155 1156
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1157 1158
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1159
	 * hsw should have this fixed, but bdw mucks it up again. */
1160 1161 1162 1163 1164 1165 1166 1167
	if (flags & I915_DISPATCH_SECURE &&
	    !batch_obj->has_global_gtt_mapping) {
		/* When we have multiple VMs, we'll need to make sure that we
		 * allocate space first */
		struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
		BUG_ON(!vma);
		vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
	}
1168

1169
	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1170
	if (ret)
1171 1172
		goto err;

1173
	ret = i915_switch_context(ring, file, ctx);
1174 1175 1176
	if (ret)
		goto err;

1177 1178 1179 1180 1181 1182 1183 1184 1185
	if (ring == &dev_priv->ring[RCS] &&
	    mode != dev_priv->relative_constants_mode) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
				goto err;

		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, INSTPM);
1186
		intel_ring_emit(ring, mask << 16 | mode);
1187 1188 1189 1190 1191
		intel_ring_advance(ring);

		dev_priv->relative_constants_mode = mode;
	}

1192 1193 1194 1195 1196 1197
	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		if (ret)
			goto err;
	}

1198 1199
	exec_start = i915_gem_obj_offset(batch_obj, vm) +
		args->batch_start_offset;
1200 1201 1202 1203 1204 1205 1206 1207 1208
	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto err;

			ret = ring->dispatch_execbuffer(ring,
1209 1210
							exec_start, exec_len,
							flags);
1211 1212 1213 1214
			if (ret)
				goto err;
		}
	} else {
1215 1216 1217
		ret = ring->dispatch_execbuffer(ring,
						exec_start, exec_len,
						flags);
1218 1219 1220
		if (ret)
			goto err;
	}
1221

1222 1223
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);

1224
	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1225
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1226 1227

err:
1228 1229
	/* the request owns the ref now */
	i915_gem_context_unreference(ctx);
1230
	eb_destroy(eb);
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	kfree(cliprects);
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1254
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1255 1256 1257 1258 1259 1260 1261
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1262
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1263 1264 1265 1266 1267 1268
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1269
			     to_user_ptr(args->buffers_ptr),
1270 1271
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1272
		DRM_DEBUG("copy %d exec entries failed %d\n",
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1300
	i915_execbuffer2_set_context_id(exec2, 0);
1301

1302
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1303 1304 1305 1306 1307
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
V
Ville Syrjälä 已提交
1308
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1309 1310 1311 1312
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1313
			DRM_DEBUG("failed to copy %d exec entries "
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1332 1333
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1334
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1335 1336 1337
		return -EINVAL;
	}

1338
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1339
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1340 1341 1342
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1343
	if (exec2_list == NULL) {
1344
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1345 1346 1347 1348
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1349
			     to_user_ptr(args->buffers_ptr),
1350 1351
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1352
		DRM_DEBUG("copy %d exec entries failed %d\n",
1353 1354 1355 1356 1357
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1358
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1359 1360
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
V
Ville Syrjälä 已提交
1361
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1362 1363 1364 1365
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1366
			DRM_DEBUG("failed to copy %d exec entries "
1367 1368 1369 1370 1371 1372 1373 1374
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}