i915_gem_execbuffer.c 36.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35

36 37
struct eb_vmas {
	struct list_head vmas;
38
	int and;
39
	union {
40
		struct i915_vma *lut[0];
41 42
		struct hlist_head buckets[0];
	};
43 44
};

45
static struct eb_vmas *
B
Ben Widawsky 已提交
46
eb_create(struct drm_i915_gem_execbuffer2 *args)
47
{
48
	struct eb_vmas *eb = NULL;
49 50

	if (args->flags & I915_EXEC_HANDLE_LUT) {
51
		unsigned size = args->buffer_count;
52 53
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
54 55 56 57
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
58 59
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
60
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 62 63
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
64
			     sizeof(struct eb_vmas),
65 66 67 68 69 70 71 72
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

73
	INIT_LIST_HEAD(&eb->vmas);
74 75 76 77
	return eb;
}

static void
78
eb_reset(struct eb_vmas *eb)
79
{
80 81
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
82 83
}

84
static int
85 86 87 88 89
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
90
{
91 92 93
	struct drm_i915_gem_object *obj;
	struct list_head objects;
	int i, ret = 0;
94

95
	INIT_LIST_HEAD(&objects);
96
	spin_lock(&file->table_lock);
97 98
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
99
	for (i = 0; i < args->buffer_count; i++) {
100 101 102 103 104
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
105 106
			ret = -ENOENT;
			goto out;
107 108
		}

109
		if (!list_empty(&obj->obj_exec_link)) {
110 111 112
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
113 114
			ret = -EINVAL;
			goto out;
115 116 117
		}

		drm_gem_object_reference(&obj->base);
118 119 120
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
121

122 123 124 125
	i = 0;
	list_for_each_entry(obj, &objects, obj_exec_link) {
		struct i915_vma *vma;

126 127 128 129 130 131 132 133
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
134 135 136 137 138 139 140 141 142 143
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
			goto out;
		}

		list_add_tail(&vma->exec_list, &eb->vmas);

		vma->exec_entry = &exec[i];
144
		if (eb->and < 0) {
145
			eb->lut[i] = vma;
146 147
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
148 149
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
150 151
				       &eb->buckets[handle & eb->and]);
		}
152
		++i;
153 154
	}

155 156 157 158 159 160 161 162 163 164 165

out:
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
		if (ret)
			drm_gem_object_unreference(&obj->base);
	}
	return ret;
166 167
}

168
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
169
{
170 171 172 173 174 175 176
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
		struct hlist_node *node;
177

178 179
		head = &eb->buckets[handle & eb->and];
		hlist_for_each(node, head) {
180
			struct i915_vma *vma;
181

182 183 184
			vma = hlist_entry(node, struct i915_vma, exec_node);
			if (vma->exec_handle == handle)
				return vma;
185 186 187
		}
		return NULL;
	}
188 189
}

190 191 192
static void eb_destroy(struct eb_vmas *eb) {
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
193

194 195
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
196
				       exec_list);
197 198
		list_del_init(&vma->exec_list);
		drm_gem_object_unreference(&vma->obj->base);
199
	}
200 201 202
	kfree(eb);
}

203 204
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
205 206
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
207
		!obj->map_and_fenceable ||
208 209 210
		obj->cache_level != I915_CACHE_NONE);
}

211 212 213 214
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
215
	struct drm_device *dev = obj->base.dev;
216 217 218 219
	uint32_t page_offset = offset_in_page(reloc->offset);
	char *vaddr;
	int ret = -EINVAL;

220
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
221 222 223 224 225 226
	if (ret)
		return ret;

	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
				reloc->offset >> PAGE_SHIFT));
	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
227 228 229 230 231 232 233 234 235 236 237 238 239

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

		*(uint32_t *)(vaddr + page_offset) = 0;
	}

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t __iomem *reloc_entry;
	void __iomem *reloc_page;
	int ret = -EINVAL;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
	reloc->offset += i915_gem_obj_ggtt_offset(obj);
	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
			reloc->offset & PAGE_MASK);
	reloc_entry = (uint32_t __iomem *)
		(reloc_page + offset_in_page(reloc->offset));
	iowrite32(reloc->delta, reloc_entry);
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284

	if (INTEL_INFO(dev)->gen >= 8) {
		reloc_entry += 1;

		if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
			io_mapping_unmap_atomic(reloc_page);
			reloc_page = io_mapping_map_atomic_wc(
					dev_priv->gtt.mappable,
					reloc->offset + sizeof(uint32_t));
			reloc_entry = reloc_page;
		}

		iowrite32(0, reloc_entry);
	}

285 286 287 288 289
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

290 291
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
292
				   struct eb_vmas *eb,
293 294
				   struct drm_i915_gem_relocation_entry *reloc,
				   struct i915_address_space *vm)
295 296 297
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
298
	struct drm_i915_gem_object *target_i915_obj;
299
	struct i915_vma *target_vma;
300 301 302
	uint32_t target_offset;
	int ret = -EINVAL;

303
	/* we've already hold a reference to all valid objects */
304 305
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
306
		return -ENOENT;
307 308
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
309

310
	target_offset = target_vma->node.start;
311

312 313 314 315 316 317 318 319 320 321
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
	    !target_i915_obj->has_global_gtt_mapping)) {
		i915_gem_gtt_bind_object(target_i915_obj,
					 target_i915_obj->cache_level);
	}

322
	/* Validate that the target is in a valid r/w GPU domain */
323
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
324
		DRM_DEBUG("reloc with multiple write domains: "
325 326 327 328 329 330
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
331
		return ret;
332
	}
333 334
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
335
		DRM_DEBUG("reloc with read/write non-GPU domains: "
336 337 338 339 340 341
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
342
		return ret;
343 344 345 346 347 348 349 350 351
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
352
		return 0;
353 354

	/* Check that the relocation address is valid... */
355 356
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
357
		DRM_DEBUG("Relocation beyond object bounds: "
358 359 360 361
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
362
		return ret;
363
	}
364
	if (unlikely(reloc->offset & 3)) {
365
		DRM_DEBUG("Relocation not 4-byte aligned: "
366 367 368
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
369
		return ret;
370 371
	}

372 373 374 375
	/* We can't wait for rendering with pagefaults disabled */
	if (obj->active && in_atomic())
		return -EFAULT;

376
	reloc->delta += target_offset;
377 378 379 380
	if (use_cpu_reloc(obj))
		ret = relocate_entry_cpu(obj, reloc);
	else
		ret = relocate_entry_gtt(obj, reloc);
381

382 383 384
	if (ret)
		return ret;

385 386 387
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

388
	return 0;
389 390 391
}

static int
392 393
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
394
{
395 396
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
397
	struct drm_i915_gem_relocation_entry __user *user_relocs;
398
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
399
	int remain, ret;
400

V
Ville Syrjälä 已提交
401
	user_relocs = to_user_ptr(entry->relocs_ptr);
402

403 404 405 406 407 408 409 410 411
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
412 413
			return -EFAULT;

414 415
		do {
			u64 offset = r->presumed_offset;
416

417 418
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
								 vma->vm);
419 420 421 422 423 424 425 426 427 428 429 430 431
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
432 433 434
	}

	return 0;
435
#undef N_RELOC
436 437 438
}

static int
439 440 441
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
442
{
443
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
444 445 446
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
447 448
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
							 vma->vm);
449 450 451 452 453 454 455 456
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
457
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
458
{
459
	struct i915_vma *vma;
460 461 462 463 464 465 466 467 468 469
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
470 471
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
472
		if (ret)
473
			break;
474
	}
475
	pagefault_enable();
476

477
	return ret;
478 479
}

480 481
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
482

483
static int
484
need_reloc_mappable(struct i915_vma *vma)
485
{
486 487 488
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
		i915_is_ggtt(vma->vm);
489 490
}

491
static int
492 493 494
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
				struct intel_ring_buffer *ring,
				bool *need_reloc)
495
{
496 497
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
498 499
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	bool need_fence, need_mappable;
500
	struct drm_i915_gem_object *obj = vma->obj;
501 502 503 504 505 506
	int ret;

	need_fence =
		has_fenced_gpu_access &&
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
		obj->tiling_mode != I915_TILING_NONE;
507
	need_mappable = need_fence || need_reloc_mappable(vma);
508

509
	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
510
				  false);
511 512 513
	if (ret)
		return ret;

514 515
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

516 517
	if (has_fenced_gpu_access) {
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
518
			ret = i915_gem_object_get_fence(obj);
519
			if (ret)
520
				return ret;
521

522
			if (i915_gem_object_pin_fence(obj))
523
				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
524

525
			obj->pending_fenced_gpu_access = true;
526 527 528
		}
	}

529 530 531 532 533 534 535 536
	/* Ensure ppgtt mapping exists if needed */
	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
				       obj, obj->cache_level);

		obj->has_aliasing_ppgtt_mapping = 1;
	}

537 538
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
539 540 541 542 543 544 545 546 547 548 549 550
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
	    !obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(obj, obj->cache_level);

551
	return 0;
552
}
553

554
static void
555
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
556 557
{
	struct drm_i915_gem_exec_object2 *entry;
558
	struct drm_i915_gem_object *obj = vma->obj;
559

560
	if (!drm_mm_node_allocated(&vma->node))
561 562
		return;

563
	entry = vma->exec_entry;
564 565 566 567 568 569 570 571

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
		i915_gem_object_unpin(obj);

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
572 573
}

574
static int
575
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
576
			    struct list_head *vmas,
577
			    bool *need_relocs)
578
{
579
	struct drm_i915_gem_object *obj;
580
	struct i915_vma *vma;
581
	struct i915_address_space *vm;
582
	struct list_head ordered_vmas;
583 584
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	int retry;
585

586 587 588 589 590
	if (list_empty(vmas))
		return 0;

	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

591 592
	INIT_LIST_HEAD(&ordered_vmas);
	while (!list_empty(vmas)) {
593 594 595
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

596 597 598
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
599 600 601 602 603

		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
604
		need_mappable = need_fence || need_reloc_mappable(vma);
605 606

		if (need_mappable)
607
			list_move(&vma->exec_list, &ordered_vmas);
608
		else
609
			list_move_tail(&vma->exec_list, &ordered_vmas);
610

611
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
612
		obj->base.pending_write_domain = 0;
613
		obj->pending_fenced_gpu_access = false;
614
	}
615
	list_splice(&ordered_vmas, vmas);
616 617 618 619 620 621 622 623 624 625

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
626
	 * This avoid unnecessary unbinding of later objects in order to make
627 628 629 630
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
631
		int ret = 0;
632 633

		/* Unbind any ill-fitting objects or pin. */
634 635
		list_for_each_entry(vma, vmas, exec_list) {
			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
636
			bool need_fence, need_mappable;
637

638 639 640
			obj = vma->obj;

			if (!drm_mm_node_allocated(&vma->node))
641 642 643
				continue;

			need_fence =
644
				has_fenced_gpu_access &&
645 646
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;
647
			need_mappable = need_fence || need_reloc_mappable(vma);
648

649
			WARN_ON((need_mappable || need_fence) &&
650
			       !i915_is_ggtt(vma->vm));
651

652
			if ((entry->alignment &&
653
			     vma->node.start & (entry->alignment - 1)) ||
654
			    (need_mappable && !obj->map_and_fenceable))
655
				ret = i915_vma_unbind(vma);
656
			else
657
				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
658
			if (ret)
659 660 661 662
				goto err;
		}

		/* Bind fresh objects */
663 664
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
665
				continue;
666

667
			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
668 669
			if (ret)
				goto err;
670 671
		}

672
err:		/* Decrement pin count for bound objects */
673 674
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);
675

C
Chris Wilson 已提交
676
		if (ret != -ENOSPC || retry++)
677 678
			return ret;

679
		ret = i915_gem_evict_vm(vm, true);
680 681 682 683 684 685 686
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
687
				  struct drm_i915_gem_execbuffer2 *args,
688
				  struct drm_file *file,
689
				  struct intel_ring_buffer *ring,
690 691
				  struct eb_vmas *eb,
				  struct drm_i915_gem_exec_object2 *exec)
692 693
{
	struct drm_i915_gem_relocation_entry *reloc;
694 695
	struct i915_address_space *vm;
	struct i915_vma *vma;
696
	bool need_relocs;
697
	int *reloc_offset;
698
	int i, total, ret;
699
	unsigned count = args->buffer_count;
700

701 702 703 704 705
	if (WARN_ON(list_empty(&eb->vmas)))
		return 0;

	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

706
	/* We may process another execbuffer during the unlock... */
707 708 709 710
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
		drm_gem_object_unreference(&vma->obj->base);
711 712
	}

713 714 715 716
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
717
		total += exec[i].relocation_count;
718

719
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
720
	reloc = drm_malloc_ab(total, sizeof(*reloc));
721 722 723
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
724 725 726 727 728 729 730
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
731 732
		u64 invalid_offset = (u64)-1;
		int j;
733

V
Ville Syrjälä 已提交
734
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
735 736

		if (copy_from_user(reloc+total, user_relocs,
737
				   exec[i].relocation_count * sizeof(*reloc))) {
738 739 740 741 742
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
			if (copy_to_user(&user_relocs[j].presumed_offset,
					 &invalid_offset,
					 sizeof(invalid_offset))) {
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

762
		reloc_offset[i] = total;
763
		total += exec[i].relocation_count;
764 765 766 767 768 769 770 771
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

772 773
	/* reacquire the objects */
	eb_reset(eb);
774
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
775 776
	if (ret)
		goto err;
777

778
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
779
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
780 781 782
	if (ret)
		goto err;

783 784 785 786
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
787 788 789 790 791 792 793 794 795 796 797 798
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
799
	drm_free_large(reloc_offset);
800 801 802 803
	return ret;
}

static int
804
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
805
				struct list_head *vmas)
806
{
807
	struct i915_vma *vma;
808
	uint32_t flush_domains = 0;
809
	bool flush_chipset = false;
810
	int ret;
811

812 813
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
814
		ret = i915_gem_object_sync(obj, ring);
815 816
		if (ret)
			return ret;
817 818

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
819
			flush_chipset |= i915_gem_clflush_object(obj, false);
820 821

		flush_domains |= obj->base.write_domain;
822 823
	}

824
	if (flush_chipset)
825
		i915_gem_chipset_flush(ring->dev);
826 827 828 829

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

830 831 832
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
833
	return intel_ring_invalidate_all_caches(ring);
834 835
}

836 837
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
838
{
839 840 841
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

842
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
843 844 845 846 847 848 849
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;
850 851
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
852 853

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
854
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
855 856
		int length; /* limited by fault_in_pages_readable() */

857 858 859
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
			return -EINVAL;

860 861 862 863 864
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
865
			return -EINVAL;
866
		relocs_total += exec[i].relocation_count;
867 868 869

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
870 871 872 873 874
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
875 876 877
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

878 879 880 881
		if (likely(!i915_prefault_disable)) {
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
882 883 884 885 886
	}

	return 0;
}

887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
static int
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
			  const u32 ctx_id)
{
	struct i915_ctx_hang_stats *hs;

	hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
	if (IS_ERR(hs))
		return PTR_ERR(hs);

	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
		return -EIO;
	}

	return 0;
}

905
static void
906
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
907
				   struct intel_ring_buffer *ring)
908
{
909
	struct i915_vma *vma;
910

911 912
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
913 914
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
915

916
		obj->base.write_domain = obj->base.pending_write_domain;
917 918 919
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
920 921
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

B
Ben Widawsky 已提交
922
		i915_vma_move_to_active(vma, ring);
923 924
		if (obj->base.write_domain) {
			obj->dirty = 1;
925
			obj->last_write_seqno = intel_ring_get_seqno(ring);
926
			if (obj->pin_count) /* check for potential scanout */
927
				intel_mark_fb_busy(obj, ring);
928 929
		}

C
Chris Wilson 已提交
930
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
931 932 933
	}
}

934 935
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
936
				    struct drm_file *file,
937 938
				    struct intel_ring_buffer *ring,
				    struct drm_i915_gem_object *obj)
939
{
940 941
	/* Unconditionally force add_request to emit a full flush. */
	ring->gpu_caches_dirty = true;
942

943
	/* Add a breadcrumb for the completion of the batch buffer */
944
	(void)__i915_add_request(ring, file, obj, NULL);
945
}
946

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
			    struct intel_ring_buffer *ring)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret, i;

	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
		return 0;

	ret = intel_ring_begin(ring, 4 * 3);
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
	}

	intel_ring_advance(ring);

	return 0;
}

972 973 974 975
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
976 977
		       struct drm_i915_gem_exec_object2 *exec,
		       struct i915_address_space *vm)
978 979
{
	drm_i915_private_t *dev_priv = dev->dev_private;
980
	struct eb_vmas *eb;
981 982 983
	struct drm_i915_gem_object *batch_obj;
	struct drm_clip_rect *cliprects = NULL;
	struct intel_ring_buffer *ring;
984
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
985
	u32 exec_start, exec_len;
986
	u32 mask, flags;
987
	int ret, mode, i;
988
	bool need_relocs;
989

990
	if (!i915_gem_check_execbuffer(args))
991 992 993
		return -EINVAL;

	ret = validate_exec_list(exec, args->buffer_count);
994 995 996
	if (ret)
		return ret;

997 998 999 1000 1001 1002 1003
	flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

		flags |= I915_DISPATCH_SECURE;
	}
1004 1005
	if (args->flags & I915_EXEC_IS_PINNED)
		flags |= I915_DISPATCH_PINNED;
1006

1007 1008 1009
	switch (args->flags & I915_EXEC_RING_MASK) {
	case I915_EXEC_DEFAULT:
	case I915_EXEC_RENDER:
1010
		ring = &dev_priv->ring[RCS];
1011 1012
		break;
	case I915_EXEC_BSD:
1013
		ring = &dev_priv->ring[VCS];
1014
		if (ctx_id != DEFAULT_CONTEXT_ID) {
1015 1016 1017 1018
			DRM_DEBUG("Ring %s doesn't support contexts\n",
				  ring->name);
			return -EPERM;
		}
1019 1020
		break;
	case I915_EXEC_BLT:
1021
		ring = &dev_priv->ring[BCS];
1022
		if (ctx_id != DEFAULT_CONTEXT_ID) {
1023 1024 1025 1026
			DRM_DEBUG("Ring %s doesn't support contexts\n",
				  ring->name);
			return -EPERM;
		}
1027
		break;
1028 1029
	case I915_EXEC_VEBOX:
		ring = &dev_priv->ring[VECS];
1030
		if (ctx_id != DEFAULT_CONTEXT_ID) {
1031 1032 1033 1034 1035 1036
			DRM_DEBUG("Ring %s doesn't support contexts\n",
				  ring->name);
			return -EPERM;
		}
		break;

1037
	default:
1038
		DRM_DEBUG("execbuf with unknown ring: %d\n",
1039 1040 1041
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1042 1043 1044 1045 1046
	if (!intel_ring_initialized(ring)) {
		DRM_DEBUG("execbuf with invalid ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1047

1048
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1049
	mask = I915_EXEC_CONSTANTS_MASK;
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	switch (mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (ring == &dev_priv->ring[RCS] &&
		    mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4)
				return -EINVAL;

			if (INTEL_INFO(dev)->gen > 5 &&
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
				return -EINVAL;
1062 1063 1064 1065

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1066 1067 1068
		}
		break;
	default:
1069
		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1070 1071 1072
		return -EINVAL;
	}

1073
	if (args->buffer_count < 1) {
1074
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1075 1076 1077 1078
		return -EINVAL;
	}

	if (args->num_cliprects != 0) {
1079
		if (ring != &dev_priv->ring[RCS]) {
1080
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1081 1082 1083
			return -EINVAL;
		}

1084 1085 1086 1087 1088
		if (INTEL_INFO(dev)->gen >= 5) {
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
			return -EINVAL;
		}

1089 1090 1091 1092 1093
		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
			DRM_DEBUG("execbuf with %u cliprects\n",
				  args->num_cliprects);
			return -EINVAL;
		}
1094

D
Daniel Vetter 已提交
1095 1096
		cliprects = kcalloc(args->num_cliprects,
				    sizeof(*cliprects),
1097 1098 1099 1100 1101 1102
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto pre_mutex_err;
		}

1103
		if (copy_from_user(cliprects,
V
Ville Syrjälä 已提交
1104 1105
				   to_user_ptr(args->cliprects_ptr),
				   sizeof(*cliprects)*args->num_cliprects)) {
1106 1107 1108 1109 1110
			ret = -EFAULT;
			goto pre_mutex_err;
		}
	}

1111 1112
	intel_runtime_pm_get(dev_priv);

1113 1114 1115 1116
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1117
	if (dev_priv->ums.mm_suspended) {
1118 1119 1120 1121 1122
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1123 1124 1125 1126 1127 1128
	ret = i915_gem_validate_context(dev, file, ctx_id);
	if (ret) {
		mutex_unlock(&dev->struct_mutex);
		goto pre_mutex_err;
	}

B
Ben Widawsky 已提交
1129
	eb = eb_create(args);
1130 1131 1132 1133 1134 1135
	if (eb == NULL) {
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1136
	/* Look up object handles */
1137
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1138 1139
	if (ret)
		goto err;
1140

1141
	/* take note of the batch buffer before we might reorder the lists */
1142
	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1143

1144
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1145
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1146
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1147 1148 1149 1150
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1151
	if (need_relocs)
B
Ben Widawsky 已提交
1152
		ret = i915_gem_execbuffer_relocate(eb);
1153 1154
	if (ret) {
		if (ret == -EFAULT) {
1155
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1156
								eb, exec);
1157 1158 1159 1160 1161 1162 1163 1164
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1165
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1166 1167 1168 1169 1170
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1171 1172
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1173
	 * hsw should have this fixed, but bdw mucks it up again. */
1174 1175 1176
	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);

1177
	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1178
	if (ret)
1179 1180
		goto err;

1181 1182 1183 1184
	ret = i915_switch_context(ring, file, ctx_id);
	if (ret)
		goto err;

1185 1186 1187 1188 1189 1190 1191 1192 1193
	if (ring == &dev_priv->ring[RCS] &&
	    mode != dev_priv->relative_constants_mode) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
				goto err;

		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, INSTPM);
1194
		intel_ring_emit(ring, mask << 16 | mode);
1195 1196 1197 1198 1199
		intel_ring_advance(ring);

		dev_priv->relative_constants_mode = mode;
	}

1200 1201 1202 1203 1204 1205
	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		if (ret)
			goto err;
	}

1206 1207
	exec_start = i915_gem_obj_offset(batch_obj, vm) +
		args->batch_start_offset;
1208 1209 1210 1211 1212 1213 1214 1215 1216
	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto err;

			ret = ring->dispatch_execbuffer(ring,
1217 1218
							exec_start, exec_len,
							flags);
1219 1220 1221 1222
			if (ret)
				goto err;
		}
	} else {
1223 1224 1225
		ret = ring->dispatch_execbuffer(ring,
						exec_start, exec_len,
						flags);
1226 1227 1228
		if (ret)
			goto err;
	}
1229

1230 1231
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);

1232
	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1233
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1234 1235

err:
1236
	eb_destroy(eb);
1237 1238 1239 1240 1241

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	kfree(cliprects);
1242 1243 1244 1245

	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
	intel_runtime_pm_put(dev_priv);
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
1257
	struct drm_i915_private *dev_priv = dev->dev_private;
1258 1259 1260 1261 1262 1263 1264
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1265
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1266 1267 1268 1269 1270 1271 1272
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1273
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1274 1275 1276 1277 1278 1279
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1280
			     to_user_ptr(args->buffers_ptr),
1281 1282
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1283
		DRM_DEBUG("copy %d exec entries failed %d\n",
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1311
	i915_execbuffer2_set_context_id(exec2, 0);
1312

1313 1314
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
				     &dev_priv->gtt.base);
1315 1316 1317 1318 1319
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
V
Ville Syrjälä 已提交
1320
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1321 1322 1323 1324
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1325
			DRM_DEBUG("failed to copy %d exec entries "
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
1340
	struct drm_i915_private *dev_priv = dev->dev_private;
1341 1342 1343 1344
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1345 1346
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1347
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1348 1349 1350
		return -EINVAL;
	}

1351
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1352
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1353 1354 1355
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1356
	if (exec2_list == NULL) {
1357
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1358 1359 1360 1361
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1362
			     to_user_ptr(args->buffers_ptr),
1363 1364
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1365
		DRM_DEBUG("copy %d exec entries failed %d\n",
1366 1367 1368 1369 1370
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1371 1372
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
				     &dev_priv->gtt.base);
1373 1374
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
V
Ville Syrjälä 已提交
1375
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1376 1377 1378 1379
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1380
			DRM_DEBUG("failed to copy %d exec entries "
1381 1382 1383 1384 1385 1386 1387 1388
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}