i915_gem_execbuffer.c 37.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35

36 37 38
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)

39 40
struct eb_vmas {
	struct list_head vmas;
41
	int and;
42
	union {
43
		struct i915_vma *lut[0];
44 45
		struct hlist_head buckets[0];
	};
46 47
};

48
static struct eb_vmas *
B
Ben Widawsky 已提交
49
eb_create(struct drm_i915_gem_execbuffer2 *args)
50
{
51
	struct eb_vmas *eb = NULL;
52 53

	if (args->flags & I915_EXEC_HANDLE_LUT) {
54
		unsigned size = args->buffer_count;
55 56
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
57 58 59 60
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
61 62
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
63
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
64 65 66
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
67
			     sizeof(struct eb_vmas),
68 69 70 71 72 73 74 75
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

76
	INIT_LIST_HEAD(&eb->vmas);
77 78 79 80
	return eb;
}

static void
81
eb_reset(struct eb_vmas *eb)
82
{
83 84
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
85 86
}

87
static int
88 89 90 91 92
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
93
{
94
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
95 96
	struct drm_i915_gem_object *obj;
	struct list_head objects;
97
	int i, ret;
98

99
	INIT_LIST_HEAD(&objects);
100
	spin_lock(&file->table_lock);
101 102
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
103
	for (i = 0; i < args->buffer_count; i++) {
104 105 106 107 108
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
109
			ret = -ENOENT;
110
			goto err;
111 112
		}

113
		if (!list_empty(&obj->obj_exec_link)) {
114 115 116
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
117
			ret = -EINVAL;
118
			goto err;
119 120 121
		}

		drm_gem_object_reference(&obj->base);
122 123 124
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
125

126
	i = 0;
127
	while (!list_empty(&objects)) {
128
		struct i915_vma *vma;
129 130
		struct i915_address_space *bind_vm = vm;

131 132 133 134 135 136
		if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
		    USES_FULL_PPGTT(vm->dev)) {
			ret = -EINVAL;
			goto out;
		}

137 138 139
		/* If we have secure dispatch, or the userspace assures us that
		 * they know what they're doing, use the GGTT VM.
		 */
140
		if (((args->flags & I915_EXEC_SECURE) &&
141 142
		    (i == (args->buffer_count - 1))))
			bind_vm = &dev_priv->gtt.base;
143

144 145 146 147
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);

148 149 150 151 152 153 154 155
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
156
		vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
157 158 159
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
160
			goto err;
161 162
		}

163
		/* Transfer ownership from the objects list to the vmas list. */
164
		list_add_tail(&vma->exec_list, &eb->vmas);
165
		list_del_init(&obj->obj_exec_link);
166 167

		vma->exec_entry = &exec[i];
168
		if (eb->and < 0) {
169
			eb->lut[i] = vma;
170 171
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
172 173
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
174 175
				       &eb->buckets[handle & eb->and]);
		}
176
		++i;
177 178
	}

179
	return 0;
180 181


182
err:
183 184 185 186 187
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
188
		drm_gem_object_unreference(&obj->base);
189
	}
190 191 192 193 194
	/*
	 * Objects already transfered to the vmas list will be unreferenced by
	 * eb_destroy.
	 */

195
	return ret;
196 197
}

198
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
199
{
200 201 202 203 204 205 206
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
		struct hlist_node *node;
207

208 209
		head = &eb->buckets[handle & eb->and];
		hlist_for_each(node, head) {
210
			struct i915_vma *vma;
211

212 213 214
			vma = hlist_entry(node, struct i915_vma, exec_node);
			if (vma->exec_handle == handle)
				return vma;
215 216 217
		}
		return NULL;
	}
218 219
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
235
		vma->pin_count--;
236 237 238 239 240 241

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static void eb_destroy(struct eb_vmas *eb)
{
242 243
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
244

245 246
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
247
				       exec_list);
248
		list_del_init(&vma->exec_list);
249
		i915_gem_execbuffer_unreserve_vma(vma);
250
		drm_gem_object_unreference(&vma->obj->base);
251
	}
252 253 254
	kfree(eb);
}

255 256
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
257 258
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
259
		!obj->map_and_fenceable ||
260 261 262
		obj->cache_level != I915_CACHE_NONE);
}

263 264 265 266
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
267
	struct drm_device *dev = obj->base.dev;
268 269
	uint32_t page_offset = offset_in_page(reloc->offset);
	char *vaddr;
270
	int ret;
271

272
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
273 274 275 276 277 278
	if (ret)
		return ret;

	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
				reloc->offset >> PAGE_SHIFT));
	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
279 280 281 282 283 284 285 286 287 288 289 290 291

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

		*(uint32_t *)(vaddr + page_offset) = 0;
	}

292 293 294 295 296 297 298 299 300 301 302 303 304
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
		   struct drm_i915_gem_relocation_entry *reloc)
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	uint32_t __iomem *reloc_entry;
	void __iomem *reloc_page;
305
	int ret;
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
	reloc->offset += i915_gem_obj_ggtt_offset(obj);
	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
			reloc->offset & PAGE_MASK);
	reloc_entry = (uint32_t __iomem *)
		(reloc_page + offset_in_page(reloc->offset));
	iowrite32(reloc->delta, reloc_entry);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336

	if (INTEL_INFO(dev)->gen >= 8) {
		reloc_entry += 1;

		if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
			io_mapping_unmap_atomic(reloc_page);
			reloc_page = io_mapping_map_atomic_wc(
					dev_priv->gtt.mappable,
					reloc->offset + sizeof(uint32_t));
			reloc_entry = reloc_page;
		}

		iowrite32(0, reloc_entry);
	}

337 338 339 340 341
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

342 343
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
344
				   struct eb_vmas *eb,
345
				   struct drm_i915_gem_relocation_entry *reloc)
346 347 348
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
349
	struct drm_i915_gem_object *target_i915_obj;
350
	struct i915_vma *target_vma;
351
	uint32_t target_offset;
352
	int ret;
353

354
	/* we've already hold a reference to all valid objects */
355 356
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
357
		return -ENOENT;
358 359
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
360

361
	target_offset = target_vma->node.start;
362

363 364 365 366 367 368
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
	    !target_i915_obj->has_global_gtt_mapping)) {
369 370 371
		struct i915_vma *vma =
			list_first_entry(&target_i915_obj->vma_list,
					 typeof(*vma), vma_link);
372
		vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
373 374
	}

375
	/* Validate that the target is in a valid r/w GPU domain */
376
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
377
		DRM_DEBUG("reloc with multiple write domains: "
378 379 380 381 382 383
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
384
		return -EINVAL;
385
	}
386 387
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
388
		DRM_DEBUG("reloc with read/write non-GPU domains: "
389 390 391 392 393 394
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
395
		return -EINVAL;
396 397 398 399 400 401 402 403 404
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
405
		return 0;
406 407

	/* Check that the relocation address is valid... */
408 409
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
410
		DRM_DEBUG("Relocation beyond object bounds: "
411 412 413 414
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
415
		return -EINVAL;
416
	}
417
	if (unlikely(reloc->offset & 3)) {
418
		DRM_DEBUG("Relocation not 4-byte aligned: "
419 420 421
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
422
		return -EINVAL;
423 424
	}

425 426 427 428
	/* We can't wait for rendering with pagefaults disabled */
	if (obj->active && in_atomic())
		return -EFAULT;

429
	reloc->delta += target_offset;
430 431 432 433
	if (use_cpu_reloc(obj))
		ret = relocate_entry_cpu(obj, reloc);
	else
		ret = relocate_entry_gtt(obj, reloc);
434

435 436 437
	if (ret)
		return ret;

438 439 440
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

441
	return 0;
442 443 444
}

static int
445 446
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
447
{
448 449
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
450
	struct drm_i915_gem_relocation_entry __user *user_relocs;
451
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
452
	int remain, ret;
453

V
Ville Syrjälä 已提交
454
	user_relocs = to_user_ptr(entry->relocs_ptr);
455

456 457 458 459 460 461 462 463 464
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
465 466
			return -EFAULT;

467 468
		do {
			u64 offset = r->presumed_offset;
469

470
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
471 472 473 474 475 476 477 478 479 480 481 482 483
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
484 485 486
	}

	return 0;
487
#undef N_RELOC
488 489 490
}

static int
491 492 493
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
494
{
495
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
496 497 498
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
499
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
500 501 502 503 504 505 506 507
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
508
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
509
{
510
	struct i915_vma *vma;
511 512 513 514 515 516 517 518 519 520
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
521 522
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
523
		if (ret)
524
			break;
525
	}
526
	pagefault_enable();
527

528
	return ret;
529 530
}

531
static int
532
need_reloc_mappable(struct i915_vma *vma)
533
{
534 535 536
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
		i915_is_ggtt(vma->vm);
537 538
}

539
static int
540 541 542
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
				struct intel_ring_buffer *ring,
				bool *need_reloc)
543
{
544
	struct drm_i915_gem_object *obj = vma->obj;
545
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
546 547
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	bool need_fence, need_mappable;
548 549
	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
550 551 552 553 554 555
	int ret;

	need_fence =
		has_fenced_gpu_access &&
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
		obj->tiling_mode != I915_TILING_NONE;
556
	need_mappable = need_fence || need_reloc_mappable(vma);
557

558
	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
559
				  false);
560 561 562
	if (ret)
		return ret;

563 564
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

565 566
	if (has_fenced_gpu_access) {
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
567
			ret = i915_gem_object_get_fence(obj);
568
			if (ret)
569
				return ret;
570

571
			if (i915_gem_object_pin_fence(obj))
572
				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
573

574
			obj->pending_fenced_gpu_access = true;
575 576 577
		}
	}

578 579
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
580 581 582 583 584 585 586 587
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

588
	vma->bind_vma(vma, obj->cache_level, flags);
589

590
	return 0;
591
}
592

593
static int
594
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
595
			    struct list_head *vmas,
596
			    bool *need_relocs)
597
{
598
	struct drm_i915_gem_object *obj;
599
	struct i915_vma *vma;
600
	struct i915_address_space *vm;
601
	struct list_head ordered_vmas;
602 603
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	int retry;
604

605 606 607 608 609
	if (list_empty(vmas))
		return 0;

	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

610 611
	INIT_LIST_HEAD(&ordered_vmas);
	while (!list_empty(vmas)) {
612 613 614
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

615 616 617
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
618 619 620 621 622

		need_fence =
			has_fenced_gpu_access &&
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
623
		need_mappable = need_fence || need_reloc_mappable(vma);
624 625

		if (need_mappable)
626
			list_move(&vma->exec_list, &ordered_vmas);
627
		else
628
			list_move_tail(&vma->exec_list, &ordered_vmas);
629

630
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
631
		obj->base.pending_write_domain = 0;
632
		obj->pending_fenced_gpu_access = false;
633
	}
634
	list_splice(&ordered_vmas, vmas);
635 636 637 638 639 640 641 642 643 644

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
645
	 * This avoid unnecessary unbinding of later objects in order to make
646 647 648 649
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
650
		int ret = 0;
651 652

		/* Unbind any ill-fitting objects or pin. */
653 654
		list_for_each_entry(vma, vmas, exec_list) {
			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
655
			bool need_fence, need_mappable;
656

657 658 659
			obj = vma->obj;

			if (!drm_mm_node_allocated(&vma->node))
660 661 662
				continue;

			need_fence =
663
				has_fenced_gpu_access &&
664 665
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
				obj->tiling_mode != I915_TILING_NONE;
666
			need_mappable = need_fence || need_reloc_mappable(vma);
667

668
			WARN_ON((need_mappable || need_fence) &&
669
			       !i915_is_ggtt(vma->vm));
670

671
			if ((entry->alignment &&
672
			     vma->node.start & (entry->alignment - 1)) ||
673
			    (need_mappable && !obj->map_and_fenceable))
674
				ret = i915_vma_unbind(vma);
675
			else
676
				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
677
			if (ret)
678 679 680 681
				goto err;
		}

		/* Bind fresh objects */
682 683
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
684
				continue;
685

686
			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
687 688
			if (ret)
				goto err;
689 690
		}

691
err:
C
Chris Wilson 已提交
692
		if (ret != -ENOSPC || retry++)
693 694
			return ret;

695 696 697 698
		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

699
		ret = i915_gem_evict_vm(vm, true);
700 701 702 703 704 705 706
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
707
				  struct drm_i915_gem_execbuffer2 *args,
708
				  struct drm_file *file,
709
				  struct intel_ring_buffer *ring,
710 711
				  struct eb_vmas *eb,
				  struct drm_i915_gem_exec_object2 *exec)
712 713
{
	struct drm_i915_gem_relocation_entry *reloc;
714 715
	struct i915_address_space *vm;
	struct i915_vma *vma;
716
	bool need_relocs;
717
	int *reloc_offset;
718
	int i, total, ret;
719
	unsigned count = args->buffer_count;
720

721 722 723 724 725
	if (WARN_ON(list_empty(&eb->vmas)))
		return 0;

	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

726
	/* We may process another execbuffer during the unlock... */
727 728 729
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
730
		i915_gem_execbuffer_unreserve_vma(vma);
731
		drm_gem_object_unreference(&vma->obj->base);
732 733
	}

734 735 736 737
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
738
		total += exec[i].relocation_count;
739

740
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
741
	reloc = drm_malloc_ab(total, sizeof(*reloc));
742 743 744
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
745 746 747 748 749 750 751
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
752 753
		u64 invalid_offset = (u64)-1;
		int j;
754

V
Ville Syrjälä 已提交
755
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
756 757

		if (copy_from_user(reloc+total, user_relocs,
758
				   exec[i].relocation_count * sizeof(*reloc))) {
759 760 761 762 763
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
			if (copy_to_user(&user_relocs[j].presumed_offset,
					 &invalid_offset,
					 sizeof(invalid_offset))) {
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

783
		reloc_offset[i] = total;
784
		total += exec[i].relocation_count;
785 786 787 788 789 790 791 792
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

793 794
	/* reacquire the objects */
	eb_reset(eb);
795
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
796 797
	if (ret)
		goto err;
798

799
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
800
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
801 802 803
	if (ret)
		goto err;

804 805 806 807
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
808 809 810 811 812 813 814 815 816 817 818 819
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
820
	drm_free_large(reloc_offset);
821 822 823 824
	return ret;
}

static int
825
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
826
				struct list_head *vmas)
827
{
828
	struct i915_vma *vma;
829
	uint32_t flush_domains = 0;
830
	bool flush_chipset = false;
831
	int ret;
832

833 834
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
835
		ret = i915_gem_object_sync(obj, ring);
836 837
		if (ret)
			return ret;
838 839

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
840
			flush_chipset |= i915_gem_clflush_object(obj, false);
841 842

		flush_domains |= obj->base.write_domain;
843 844
	}

845
	if (flush_chipset)
846
		i915_gem_chipset_flush(ring->dev);
847 848 849 850

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

851 852 853
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
854
	return intel_ring_invalidate_all_caches(ring);
855 856
}

857 858
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
859
{
860 861 862
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

863
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
864 865 866 867 868 869 870
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;
871 872
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
873 874

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
875
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
876 877
		int length; /* limited by fault_in_pages_readable() */

878 879 880
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
			return -EINVAL;

881 882 883 884 885
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
886
			return -EINVAL;
887
		relocs_total += exec[i].relocation_count;
888 889 890

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
891 892 893 894 895
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
896 897 898
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

899 900 901 902
		if (likely(!i915_prefault_disable)) {
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
903 904 905 906 907
	}

	return 0;
}

908
static struct i915_hw_context *
909
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
910
			  struct intel_ring_buffer *ring, const u32 ctx_id)
911
{
912
	struct i915_hw_context *ctx = NULL;
913 914
	struct i915_ctx_hang_stats *hs;

915 916 917
	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
		return ERR_PTR(-EINVAL);

918
	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
919
	if (IS_ERR(ctx))
920
		return ctx;
921

922
	hs = &ctx->hang_stats;
923 924
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
925
		return ERR_PTR(-EIO);
926 927
	}

928
	return ctx;
929 930
}

931
static void
932
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
933
				   struct intel_ring_buffer *ring)
934
{
935
	struct i915_vma *vma;
936

937 938
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
939 940
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
941

942
		obj->base.write_domain = obj->base.pending_write_domain;
943 944 945
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
946 947
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

B
Ben Widawsky 已提交
948
		i915_vma_move_to_active(vma, ring);
949 950
		if (obj->base.write_domain) {
			obj->dirty = 1;
951
			obj->last_write_seqno = intel_ring_get_seqno(ring);
B
Ben Widawsky 已提交
952 953 954
			/* check for potential scanout */
			if (i915_gem_obj_ggtt_bound(obj) &&
			    i915_gem_obj_to_ggtt(obj)->pin_count)
955
				intel_mark_fb_busy(obj, ring);
956 957
		}

C
Chris Wilson 已提交
958
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
959 960 961
	}
}

962 963
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
964
				    struct drm_file *file,
965 966
				    struct intel_ring_buffer *ring,
				    struct drm_i915_gem_object *obj)
967
{
968 969
	/* Unconditionally force add_request to emit a full flush. */
	ring->gpu_caches_dirty = true;
970

971
	/* Add a breadcrumb for the completion of the batch buffer */
972
	(void)__i915_add_request(ring, file, obj, NULL);
973
}
974

975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
			    struct intel_ring_buffer *ring)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int ret, i;

	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
		return 0;

	ret = intel_ring_begin(ring, 4 * 3);
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
	}

	intel_ring_advance(ring);

	return 0;
}

1000 1001 1002 1003
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1004
		       struct drm_i915_gem_exec_object2 *exec)
1005 1006
{
	drm_i915_private_t *dev_priv = dev->dev_private;
1007
	struct eb_vmas *eb;
1008 1009 1010
	struct drm_i915_gem_object *batch_obj;
	struct drm_clip_rect *cliprects = NULL;
	struct intel_ring_buffer *ring;
1011 1012
	struct i915_hw_context *ctx;
	struct i915_address_space *vm;
1013
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1014
	u32 exec_start = args->batch_start_offset, exec_len;
1015
	u32 mask, flags;
1016
	int ret, mode, i;
1017
	bool need_relocs;
1018

1019
	if (!i915_gem_check_execbuffer(args))
1020 1021 1022
		return -EINVAL;

	ret = validate_exec_list(exec, args->buffer_count);
1023 1024 1025
	if (ret)
		return ret;

1026 1027 1028 1029 1030 1031 1032
	flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

		flags |= I915_DISPATCH_SECURE;
	}
1033 1034
	if (args->flags & I915_EXEC_IS_PINNED)
		flags |= I915_DISPATCH_PINNED;
1035

1036
	if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1037
		DRM_DEBUG("execbuf with unknown ring: %d\n",
1038 1039 1040
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1041 1042 1043 1044 1045 1046

	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
		ring = &dev_priv->ring[RCS];
	else
		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];

1047 1048 1049 1050 1051
	if (!intel_ring_initialized(ring)) {
		DRM_DEBUG("execbuf with invalid ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1052

1053
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1054
	mask = I915_EXEC_CONSTANTS_MASK;
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	switch (mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (ring == &dev_priv->ring[RCS] &&
		    mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4)
				return -EINVAL;

			if (INTEL_INFO(dev)->gen > 5 &&
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
				return -EINVAL;
1067 1068 1069 1070

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1071 1072 1073
		}
		break;
	default:
1074
		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1075 1076 1077
		return -EINVAL;
	}

1078
	if (args->buffer_count < 1) {
1079
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1080 1081 1082 1083
		return -EINVAL;
	}

	if (args->num_cliprects != 0) {
1084
		if (ring != &dev_priv->ring[RCS]) {
1085
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1086 1087 1088
			return -EINVAL;
		}

1089 1090 1091 1092 1093
		if (INTEL_INFO(dev)->gen >= 5) {
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
			return -EINVAL;
		}

1094 1095 1096 1097 1098
		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
			DRM_DEBUG("execbuf with %u cliprects\n",
				  args->num_cliprects);
			return -EINVAL;
		}
1099

D
Daniel Vetter 已提交
1100 1101
		cliprects = kcalloc(args->num_cliprects,
				    sizeof(*cliprects),
1102 1103 1104 1105 1106 1107
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto pre_mutex_err;
		}

1108
		if (copy_from_user(cliprects,
V
Ville Syrjälä 已提交
1109 1110
				   to_user_ptr(args->cliprects_ptr),
				   sizeof(*cliprects)*args->num_cliprects)) {
1111 1112 1113 1114 1115
			ret = -EFAULT;
			goto pre_mutex_err;
		}
	}

1116 1117
	intel_runtime_pm_get(dev_priv);

1118 1119 1120 1121
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1122
	if (dev_priv->ums.mm_suspended) {
1123 1124 1125 1126 1127
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1128
	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1129
	if (IS_ERR(ctx)) {
1130
		mutex_unlock(&dev->struct_mutex);
1131
		ret = PTR_ERR(ctx);
1132
		goto pre_mutex_err;
1133 1134 1135 1136
	} 

	i915_gem_context_reference(ctx);

1137 1138 1139
	vm = ctx->vm;
	if (!USES_FULL_PPGTT(dev))
		vm = &dev_priv->gtt.base;
1140

B
Ben Widawsky 已提交
1141
	eb = eb_create(args);
1142 1143 1144 1145 1146 1147
	if (eb == NULL) {
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1148
	/* Look up object handles */
1149
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1150 1151
	if (ret)
		goto err;
1152

1153
	/* take note of the batch buffer before we might reorder the lists */
1154
	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1155

1156
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1157
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1158
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1159 1160 1161 1162
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1163
	if (need_relocs)
B
Ben Widawsky 已提交
1164
		ret = i915_gem_execbuffer_relocate(eb);
1165 1166
	if (ret) {
		if (ret == -EFAULT) {
1167
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1168
								eb, exec);
1169 1170 1171 1172 1173 1174 1175 1176
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1177
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1178 1179 1180 1181 1182
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1183 1184
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1185
	 * hsw should have this fixed, but bdw mucks it up again. */
1186 1187 1188 1189 1190 1191 1192 1193
	if (flags & I915_DISPATCH_SECURE &&
	    !batch_obj->has_global_gtt_mapping) {
		/* When we have multiple VMs, we'll need to make sure that we
		 * allocate space first */
		struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
		BUG_ON(!vma);
		vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
	}
1194

1195 1196 1197 1198
	if (flags & I915_DISPATCH_SECURE)
		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
	else
		exec_start += i915_gem_obj_offset(batch_obj, vm);
1199

1200
	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1201
	if (ret)
1202 1203
		goto err;

1204
	ret = i915_switch_context(ring, file, ctx);
1205 1206 1207
	if (ret)
		goto err;

1208 1209 1210 1211 1212 1213 1214 1215 1216
	if (ring == &dev_priv->ring[RCS] &&
	    mode != dev_priv->relative_constants_mode) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
				goto err;

		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, INSTPM);
1217
		intel_ring_emit(ring, mask << 16 | mode);
1218 1219 1220 1221 1222
		intel_ring_advance(ring);

		dev_priv->relative_constants_mode = mode;
	}

1223 1224 1225 1226 1227 1228
	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		if (ret)
			goto err;
	}

1229

1230 1231 1232 1233 1234 1235 1236 1237 1238
	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto err;

			ret = ring->dispatch_execbuffer(ring,
1239 1240
							exec_start, exec_len,
							flags);
1241 1242 1243 1244
			if (ret)
				goto err;
		}
	} else {
1245 1246 1247
		ret = ring->dispatch_execbuffer(ring,
						exec_start, exec_len,
						flags);
1248 1249 1250
		if (ret)
			goto err;
	}
1251

1252 1253
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);

1254
	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1255
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1256 1257

err:
1258 1259
	/* the request owns the ref now */
	i915_gem_context_unreference(ctx);
1260
	eb_destroy(eb);
1261 1262 1263 1264 1265

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
	kfree(cliprects);
1266 1267 1268 1269

	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
	intel_runtime_pm_put(dev_priv);
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1288
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1289 1290 1291 1292 1293 1294 1295
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1296
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1297 1298 1299 1300 1301 1302
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1303
			     to_user_ptr(args->buffers_ptr),
1304 1305
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1306
		DRM_DEBUG("copy %d exec entries failed %d\n",
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1334
	i915_execbuffer2_set_context_id(exec2, 0);
1335

1336
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1337 1338 1339 1340 1341
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
		for (i = 0; i < args->buffer_count; i++)
			exec_list[i].offset = exec2_list[i].offset;
		/* ... and back out to userspace */
V
Ville Syrjälä 已提交
1342
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1343 1344 1345 1346
				   exec_list,
				   sizeof(*exec_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1347
			DRM_DEBUG("failed to copy %d exec entries "
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1366 1367
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1368
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1369 1370 1371
		return -EINVAL;
	}

1372
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1373
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1374 1375 1376
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1377
	if (exec2_list == NULL) {
1378
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1379 1380 1381 1382
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1383
			     to_user_ptr(args->buffers_ptr),
1384 1385
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1386
		DRM_DEBUG("copy %d exec entries failed %d\n",
1387 1388 1389 1390 1391
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1392
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1393 1394
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
V
Ville Syrjälä 已提交
1395
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1396 1397 1398 1399
				   exec2_list,
				   sizeof(*exec2_list) * args->buffer_count);
		if (ret) {
			ret = -EFAULT;
1400
			DRM_DEBUG("failed to copy %d exec entries "
1401 1402 1403 1404 1405 1406 1407 1408
				  "back to user (%d)\n",
				  args->buffer_count, ret);
		}
	}

	drm_free_large(exec2_list);
	return ret;
}