i915_gem_execbuffer.c 42.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35

36 37
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
38
#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
39 40 41
#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)

#define BATCH_OFFSET_BIAS (256*1024)
42

43 44
struct eb_vmas {
	struct list_head vmas;
45
	int and;
46
	union {
47
		struct i915_vma *lut[0];
48 49
		struct hlist_head buckets[0];
	};
50 51
};

52
static struct eb_vmas *
B
Ben Widawsky 已提交
53
eb_create(struct drm_i915_gem_execbuffer2 *args)
54
{
55
	struct eb_vmas *eb = NULL;
56 57

	if (args->flags & I915_EXEC_HANDLE_LUT) {
58
		unsigned size = args->buffer_count;
59 60
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
61 62 63 64
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
65 66
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
67
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
68 69 70
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
71
			     sizeof(struct eb_vmas),
72 73 74 75 76 77 78 79
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

80
	INIT_LIST_HEAD(&eb->vmas);
81 82 83 84
	return eb;
}

static void
85
eb_reset(struct eb_vmas *eb)
86
{
87 88
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
89 90
}

91
static int
92 93 94 95 96
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
97
{
98 99
	struct drm_i915_gem_object *obj;
	struct list_head objects;
100
	int i, ret;
101

102
	INIT_LIST_HEAD(&objects);
103
	spin_lock(&file->table_lock);
104 105
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
106
	for (i = 0; i < args->buffer_count; i++) {
107 108 109 110 111
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
112
			ret = -ENOENT;
113
			goto err;
114 115
		}

116
		if (!list_empty(&obj->obj_exec_link)) {
117 118 119
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
120
			ret = -EINVAL;
121
			goto err;
122 123
		}

124 125 126
		WARN_ONCE(obj->base.dumb,
			  "GPU use of dumb buffer is illegal.\n");

127
		drm_gem_object_reference(&obj->base);
128 129 130
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
131

132
	i = 0;
133
	while (!list_empty(&objects)) {
134
		struct i915_vma *vma;
135

136 137 138 139
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);

140 141 142 143 144 145 146 147
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
148
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
149 150 151
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
152
			goto err;
153 154
		}

155
		/* Transfer ownership from the objects list to the vmas list. */
156
		list_add_tail(&vma->exec_list, &eb->vmas);
157
		list_del_init(&obj->obj_exec_link);
158 159

		vma->exec_entry = &exec[i];
160
		if (eb->and < 0) {
161
			eb->lut[i] = vma;
162 163
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
164 165
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
166 167
				       &eb->buckets[handle & eb->and]);
		}
168
		++i;
169 170
	}

171
	return 0;
172 173


174
err:
175 176 177 178 179
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
180
		drm_gem_object_unreference(&obj->base);
181
	}
182 183 184 185 186
	/*
	 * Objects already transfered to the vmas list will be unreferenced by
	 * eb_destroy.
	 */

187
	return ret;
188 189
}

190
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
191
{
192 193 194 195 196 197 198
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
		struct hlist_node *node;
199

200 201
		head = &eb->buckets[handle & eb->and];
		hlist_for_each(node, head) {
202
			struct i915_vma *vma;
203

204 205 206
			vma = hlist_entry(node, struct i915_vma, exec_node);
			if (vma->exec_handle == handle)
				return vma;
207 208 209
		}
		return NULL;
	}
210 211
}

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
227
		vma->pin_count--;
228 229 230 231 232 233

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static void eb_destroy(struct eb_vmas *eb)
{
234 235
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
236

237 238
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
239
				       exec_list);
240
		list_del_init(&vma->exec_list);
241
		i915_gem_execbuffer_unreserve_vma(vma);
242
		drm_gem_object_unreference(&vma->obj->base);
243
	}
244 245 246
	kfree(eb);
}

247 248
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
249 250
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
251
		!obj->map_and_fenceable ||
252 253 254
		obj->cache_level != I915_CACHE_NONE);
}

255 256
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
257 258
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
259
{
260
	struct drm_device *dev = obj->base.dev;
261
	uint32_t page_offset = offset_in_page(reloc->offset);
B
Ben Widawsky 已提交
262
	uint64_t delta = reloc->delta + target_offset;
263
	char *vaddr;
264
	int ret;
265

266
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
267 268 269 270 271
	if (ret)
		return ret;

	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
				reloc->offset >> PAGE_SHIFT));
B
Ben Widawsky 已提交
272
	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
273 274 275 276 277 278 279 280 281 282

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

B
Ben Widawsky 已提交
283
		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
284 285
	}

286 287 288 289 290 291 292
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
293 294
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
295 296 297
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
B
Ben Widawsky 已提交
298
	uint64_t delta = reloc->delta + target_offset;
299
	uint64_t offset;
300
	void __iomem *reloc_page;
301
	int ret;
302 303 304 305 306 307 308 309 310 311

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
312 313
	offset = i915_gem_obj_ggtt_offset(obj);
	offset += reloc->offset;
314
	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
315 316
					      offset & PAGE_MASK);
	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
317 318

	if (INTEL_INFO(dev)->gen >= 8) {
319
		offset += sizeof(uint32_t);
320

321
		if (offset_in_page(offset) == 0) {
322
			io_mapping_unmap_atomic(reloc_page);
323 324 325
			reloc_page =
				io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
							 offset);
326 327
		}

328 329
		iowrite32(upper_32_bits(delta),
			  reloc_page + offset_in_page(offset));
330 331
	}

332 333 334 335 336
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

337 338
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
339
				   struct eb_vmas *eb,
340
				   struct drm_i915_gem_relocation_entry *reloc)
341 342 343
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
344
	struct drm_i915_gem_object *target_i915_obj;
345
	struct i915_vma *target_vma;
B
Ben Widawsky 已提交
346
	uint64_t target_offset;
347
	int ret;
348

349
	/* we've already hold a reference to all valid objects */
350 351
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
352
		return -ENOENT;
353 354
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
355

356
	target_offset = target_vma->node.start;
357

358 359 360 361 362
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
363 364 365 366 367 368
	    !(target_vma->bound & GLOBAL_BIND))) {
		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
				    GLOBAL_BIND);
		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
			return ret;
	}
369

370
	/* Validate that the target is in a valid r/w GPU domain */
371
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
372
		DRM_DEBUG("reloc with multiple write domains: "
373 374 375 376 377 378
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
379
		return -EINVAL;
380
	}
381 382
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
383
		DRM_DEBUG("reloc with read/write non-GPU domains: "
384 385 386 387 388 389
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
390
		return -EINVAL;
391 392 393 394 395 396 397 398 399
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
400
		return 0;
401 402

	/* Check that the relocation address is valid... */
403 404
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
405
		DRM_DEBUG("Relocation beyond object bounds: "
406 407 408 409
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
410
		return -EINVAL;
411
	}
412
	if (unlikely(reloc->offset & 3)) {
413
		DRM_DEBUG("Relocation not 4-byte aligned: "
414 415 416
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
417
		return -EINVAL;
418 419
	}

420 421 422 423
	/* We can't wait for rendering with pagefaults disabled */
	if (obj->active && in_atomic())
		return -EFAULT;

424
	if (use_cpu_reloc(obj))
B
Ben Widawsky 已提交
425
		ret = relocate_entry_cpu(obj, reloc, target_offset);
426
	else
B
Ben Widawsky 已提交
427
		ret = relocate_entry_gtt(obj, reloc, target_offset);
428

429 430 431
	if (ret)
		return ret;

432 433 434
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

435
	return 0;
436 437 438
}

static int
439 440
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
441
{
442 443
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
444
	struct drm_i915_gem_relocation_entry __user *user_relocs;
445
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
446
	int remain, ret;
447

V
Ville Syrjälä 已提交
448
	user_relocs = to_user_ptr(entry->relocs_ptr);
449

450 451 452 453 454 455 456 457 458
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
459 460
			return -EFAULT;

461 462
		do {
			u64 offset = r->presumed_offset;
463

464
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
465 466 467 468 469 470 471 472 473 474 475 476 477
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
478 479 480
	}

	return 0;
481
#undef N_RELOC
482 483 484
}

static int
485 486 487
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
488
{
489
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
490 491 492
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
493
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
494 495 496 497 498 499 500 501
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
502
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
503
{
504
	struct i915_vma *vma;
505 506 507 508 509 510 511 512 513 514
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
515 516
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
517
		if (ret)
518
			break;
519
	}
520
	pagefault_enable();
521

522
	return ret;
523 524
}

525
static int
526
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
527
				struct intel_engine_cs *ring,
528
				bool *need_reloc)
529
{
530
	struct drm_i915_gem_object *obj = vma->obj;
531
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
532
	uint64_t flags;
533 534
	int ret;

535
	flags = 0;
536
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
537
		flags |= PIN_GLOBAL | PIN_MAPPABLE;
538
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
539
		flags |= PIN_GLOBAL;
540 541
	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
		flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
542 543

	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
544 545 546
	if (ret)
		return ret;

547 548
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

549 550 551 552
	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
		ret = i915_gem_object_get_fence(obj);
		if (ret)
			return ret;
553

554 555
		if (i915_gem_object_pin_fence(obj))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
556 557
	}

558 559
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
560 561 562 563 564 565 566 567
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

568
	return 0;
569
}
570

571
static bool
572
need_reloc_mappable(struct i915_vma *vma)
573 574 575
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
	if (entry->relocation_count == 0)
		return false;

	if (!i915_is_ggtt(vma->vm))
		return false;

	/* See also use_cpu_reloc() */
	if (HAS_LLC(vma->obj->base.dev))
		return false;

	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

	return true;
}

static bool
eb_vma_misplaced(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	struct drm_i915_gem_object *obj = vma->obj;
597

598
	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
599 600 601 602 603 604
	       !i915_is_ggtt(vma->vm));

	if (entry->alignment &&
	    vma->node.start & (entry->alignment - 1))
		return true;

605
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
606 607 608 609 610 611 612 613 614
		return true;

	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

	return false;
}

615
static int
616
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
617
			    struct list_head *vmas,
618
			    bool *need_relocs)
619
{
620
	struct drm_i915_gem_object *obj;
621
	struct i915_vma *vma;
622
	struct i915_address_space *vm;
623
	struct list_head ordered_vmas;
624 625
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	int retry;
626

627 628
	i915_gem_retire_requests_ring(ring);

629 630
	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

631 632
	INIT_LIST_HEAD(&ordered_vmas);
	while (!list_empty(vmas)) {
633 634 635
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

636 637 638
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
639

640 641
		if (!has_fenced_gpu_access)
			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
642 643 644
		need_fence =
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
645
		need_mappable = need_fence || need_reloc_mappable(vma);
646

647 648
		if (need_mappable) {
			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
649
			list_move(&vma->exec_list, &ordered_vmas);
650
		} else
651
			list_move_tail(&vma->exec_list, &ordered_vmas);
652

653
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
654
		obj->base.pending_write_domain = 0;
655
	}
656
	list_splice(&ordered_vmas, vmas);
657 658 659 660 661 662 663 664 665 666

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
667
	 * This avoid unnecessary unbinding of later objects in order to make
668 669 670 671
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
672
		int ret = 0;
673 674

		/* Unbind any ill-fitting objects or pin. */
675 676
		list_for_each_entry(vma, vmas, exec_list) {
			if (!drm_mm_node_allocated(&vma->node))
677 678
				continue;

679
			if (eb_vma_misplaced(vma))
680
				ret = i915_vma_unbind(vma);
681
			else
682
				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
683
			if (ret)
684 685 686 687
				goto err;
		}

		/* Bind fresh objects */
688 689
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
690
				continue;
691

692
			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
693 694
			if (ret)
				goto err;
695 696
		}

697
err:
C
Chris Wilson 已提交
698
		if (ret != -ENOSPC || retry++)
699 700
			return ret;

701 702 703 704
		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

705
		ret = i915_gem_evict_vm(vm, true);
706 707 708 709 710 711 712
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
713
				  struct drm_i915_gem_execbuffer2 *args,
714
				  struct drm_file *file,
715
				  struct intel_engine_cs *ring,
716 717
				  struct eb_vmas *eb,
				  struct drm_i915_gem_exec_object2 *exec)
718 719
{
	struct drm_i915_gem_relocation_entry *reloc;
720 721
	struct i915_address_space *vm;
	struct i915_vma *vma;
722
	bool need_relocs;
723
	int *reloc_offset;
724
	int i, total, ret;
725
	unsigned count = args->buffer_count;
726

727 728
	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

729
	/* We may process another execbuffer during the unlock... */
730 731 732
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
733
		i915_gem_execbuffer_unreserve_vma(vma);
734
		drm_gem_object_unreference(&vma->obj->base);
735 736
	}

737 738 739 740
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
741
		total += exec[i].relocation_count;
742

743
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
744
	reloc = drm_malloc_ab(total, sizeof(*reloc));
745 746 747
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
748 749 750 751 752 753 754
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
755 756
		u64 invalid_offset = (u64)-1;
		int j;
757

V
Ville Syrjälä 已提交
758
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
759 760

		if (copy_from_user(reloc+total, user_relocs,
761
				   exec[i].relocation_count * sizeof(*reloc))) {
762 763 764 765 766
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

767 768 769 770 771 772 773 774 775 776
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
777 778 779
			if (__copy_to_user(&user_relocs[j].presumed_offset,
					   &invalid_offset,
					   sizeof(invalid_offset))) {
780 781 782 783 784 785
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

786
		reloc_offset[i] = total;
787
		total += exec[i].relocation_count;
788 789 790 791 792 793 794 795
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

796 797
	/* reacquire the objects */
	eb_reset(eb);
798
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
799 800
	if (ret)
		goto err;
801

802
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
803
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
804 805 806
	if (ret)
		goto err;

807 808 809 810
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
811 812 813 814 815 816 817 818 819 820 821 822
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
823
	drm_free_large(reloc_offset);
824 825 826 827
	return ret;
}

static int
828
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
829
				struct list_head *vmas)
830
{
831
	struct i915_vma *vma;
832
	uint32_t flush_domains = 0;
833
	bool flush_chipset = false;
834
	int ret;
835

836 837
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
838
		ret = i915_gem_object_sync(obj, ring);
839 840
		if (ret)
			return ret;
841 842

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
843
			flush_chipset |= i915_gem_clflush_object(obj, false);
844 845

		flush_domains |= obj->base.write_domain;
846 847
	}

848
	if (flush_chipset)
849
		i915_gem_chipset_flush(ring->dev);
850 851 852 853

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

854 855 856
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
857
	return intel_ring_invalidate_all_caches(ring);
858 859
}

860 861
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
862
{
863 864 865
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

866
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
867 868 869
}

static int
870 871
validate_exec_list(struct drm_device *dev,
		   struct drm_i915_gem_exec_object2 *exec,
872 873
		   int count)
{
874 875
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
876 877 878 879 880 881
	unsigned invalid_flags;
	int i;

	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(dev))
		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
882 883

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
884
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
885 886
		int length; /* limited by fault_in_pages_readable() */

887
		if (exec[i].flags & invalid_flags)
888 889
			return -EINVAL;

890 891 892 893 894
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
895
			return -EINVAL;
896
		relocs_total += exec[i].relocation_count;
897 898 899

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
900 901 902 903 904
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
905 906 907
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

908
		if (likely(!i915.prefault_disable)) {
909 910 911
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
912 913 914 915 916
	}

	return 0;
}

917
static struct intel_context *
918
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
919
			  struct intel_engine_cs *ring, const u32 ctx_id)
920
{
921
	struct intel_context *ctx = NULL;
922 923
	struct i915_ctx_hang_stats *hs;

924
	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
925 926
		return ERR_PTR(-EINVAL);

927
	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
928
	if (IS_ERR(ctx))
929
		return ctx;
930

931
	hs = &ctx->hang_stats;
932 933
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
934
		return ERR_PTR(-EIO);
935 936
	}

937 938 939 940 941 942 943 944
	if (i915.enable_execlists && !ctx->engine[ring->id].state) {
		int ret = intel_lr_context_deferred_create(ctx, ring);
		if (ret) {
			DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
			return ERR_PTR(ret);
		}
	}

945
	return ctx;
946 947
}

948
void
949
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
950
				   struct intel_engine_cs *ring)
951
{
952
	struct drm_i915_gem_request *req = intel_ring_get_request(ring);
953
	struct i915_vma *vma;
954

955
	list_for_each_entry(vma, vmas, exec_list) {
956
		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
957
		struct drm_i915_gem_object *obj = vma->obj;
958 959
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
960

961
		obj->base.write_domain = obj->base.pending_write_domain;
962 963 964
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
965

B
Ben Widawsky 已提交
966
		i915_vma_move_to_active(vma, ring);
967 968
		if (obj->base.write_domain) {
			obj->dirty = 1;
969
			i915_gem_request_assign(&obj->last_write_req, req);
970 971

			intel_fb_obj_invalidate(obj, ring);
972 973 974

			/* update for the implicit flush after a batch */
			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
975
		}
976
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
977
			i915_gem_request_assign(&obj->last_fenced_req, req);
978 979 980 981 982 983
			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
				struct drm_i915_private *dev_priv = to_i915(ring->dev);
				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
					       &dev_priv->mm.fence_list);
			}
		}
984

C
Chris Wilson 已提交
985
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
986 987 988
	}
}

989
void
990
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
991
				    struct drm_file *file,
992
				    struct intel_engine_cs *ring,
993
				    struct drm_i915_gem_object *obj)
994
{
995 996
	/* Unconditionally force add_request to emit a full flush. */
	ring->gpu_caches_dirty = true;
997

998
	/* Add a breadcrumb for the completion of the batch buffer */
999
	(void)__i915_add_request(ring, file, obj);
1000
}
1001

1002 1003
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
1004
			    struct intel_engine_cs *ring)
1005
{
1006
	struct drm_i915_private *dev_priv = dev->dev_private;
1007 1008
	int ret, i;

1009 1010 1011 1012
	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028

	ret = intel_ring_begin(ring, 4 * 3);
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
	}

	intel_ring_advance(ring);

	return 0;
}

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
static int
i915_emit_box(struct intel_engine_cs *ring,
	      struct drm_clip_rect *box,
	      int DR1, int DR4)
{
	int ret;

	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
	    box->y2 <= 0 || box->x2 <= 0) {
		DRM_ERROR("Bad box %d,%d..%d,%d\n",
			  box->x1, box->y1, box->x2, box->y2);
		return -EINVAL;
	}

	if (INTEL_INFO(ring->dev)->gen >= 4) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
			return ret;

		intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
		intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
		intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
		intel_ring_emit(ring, DR4);
	} else {
		ret = intel_ring_begin(ring, 6);
		if (ret)
			return ret;

		intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
		intel_ring_emit(ring, DR1);
		intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
		intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
		intel_ring_emit(ring, DR4);
		intel_ring_emit(ring, 0);
	}
	intel_ring_advance(ring);

	return 0;
}


1070 1071 1072 1073 1074 1075 1076 1077
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
			       struct intel_engine_cs *ring,
			       struct intel_context *ctx,
			       struct drm_i915_gem_execbuffer2 *args,
			       struct list_head *vmas,
			       struct drm_i915_gem_object *batch_obj,
			       u64 exec_start, u32 flags)
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
{
	struct drm_clip_rect *cliprects = NULL;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u64 exec_len;
	int instp_mode;
	u32 instp_mask;
	int i, ret = 0;

	if (args->num_cliprects != 0) {
		if (ring != &dev_priv->ring[RCS]) {
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
			return -EINVAL;
		}

		if (INTEL_INFO(dev)->gen >= 5) {
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
			return -EINVAL;
		}

		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
			DRM_DEBUG("execbuf with %u cliprects\n",
				  args->num_cliprects);
			return -EINVAL;
		}

		cliprects = kcalloc(args->num_cliprects,
				    sizeof(*cliprects),
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto error;
		}

		if (copy_from_user(cliprects,
				   to_user_ptr(args->cliprects_ptr),
				   sizeof(*cliprects)*args->num_cliprects)) {
			ret = -EFAULT;
			goto error;
		}
	} else {
		if (args->DR4 == 0xffffffff) {
			DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
			args->DR4 = 0;
		}

		if (args->DR1 || args->DR4 || args->cliprects_ptr) {
			DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
			return -EINVAL;
		}
	}

	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
	if (ret)
		goto error;

	ret = i915_switch_context(ring, ctx);
	if (ret)
		goto error;

	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
	instp_mask = I915_EXEC_CONSTANTS_MASK;
	switch (instp_mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
			ret = -EINVAL;
			goto error;
		}

		if (instp_mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4) {
				DRM_DEBUG("no rel constants on pre-gen4\n");
				ret = -EINVAL;
				goto error;
			}

			if (INTEL_INFO(dev)->gen > 5 &&
			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
				ret = -EINVAL;
				goto error;
			}

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
		}
		break;
	default:
		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
		ret = -EINVAL;
		goto error;
	}

	if (ring == &dev_priv->ring[RCS] &&
			instp_mode != dev_priv->relative_constants_mode) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
			goto error;

		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, INSTPM);
		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
		intel_ring_advance(ring);

		dev_priv->relative_constants_mode = instp_mode;
	}

	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		if (ret)
			goto error;
	}

	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
1198
			ret = i915_emit_box(ring, &cliprects[i],
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
					    args->DR1, args->DR4);
			if (ret)
				goto error;

			ret = ring->dispatch_execbuffer(ring,
							exec_start, exec_len,
							flags);
			if (ret)
				goto error;
		}
	} else {
		ret = ring->dispatch_execbuffer(ring,
						exec_start, exec_len,
						flags);
		if (ret)
			return ret;
	}

1217
	trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
1218 1219 1220 1221 1222 1223 1224 1225 1226

	i915_gem_execbuffer_move_to_active(vmas, ring);
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);

error:
	kfree(cliprects);
	return ret;
}

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
 * The Ring ID is returned.
 */
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
				  struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_file_private *file_priv = file->driver_priv;

	/* Check whether the file_priv is using one ring */
	if (file_priv->bsd_ring)
		return file_priv->bsd_ring->id;
	else {
		/* If no, use the ping-pong mechanism to select one ring */
		int ring_id;

		mutex_lock(&dev->struct_mutex);
1245
		if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1246
			ring_id = VCS;
1247
			dev_priv->mm.bsd_ring_dispatch_index = 1;
1248 1249
		} else {
			ring_id = VCS2;
1250
			dev_priv->mm.bsd_ring_dispatch_index = 0;
1251 1252 1253 1254 1255 1256 1257
		}
		file_priv->bsd_ring = &dev_priv->ring[ring_id];
		mutex_unlock(&dev->struct_mutex);
		return ring_id;
	}
}

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
{
	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);

	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return vma->obj;
}

1277 1278 1279 1280
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1281
		       struct drm_i915_gem_exec_object2 *exec)
1282
{
1283
	struct drm_i915_private *dev_priv = dev->dev_private;
1284
	struct eb_vmas *eb;
1285
	struct drm_i915_gem_object *batch_obj;
1286
	struct intel_engine_cs *ring;
1287
	struct intel_context *ctx;
1288
	struct i915_address_space *vm;
1289
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1290 1291 1292
	u64 exec_start = args->batch_start_offset;
	u32 flags;
	int ret;
1293
	bool need_relocs;
1294

1295
	if (!i915_gem_check_execbuffer(args))
1296 1297
		return -EINVAL;

1298
	ret = validate_exec_list(dev, exec, args->buffer_count);
1299 1300 1301
	if (ret)
		return ret;

1302 1303 1304 1305 1306 1307 1308
	flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

		flags |= I915_DISPATCH_SECURE;
	}
1309 1310
	if (args->flags & I915_EXEC_IS_PINNED)
		flags |= I915_DISPATCH_PINNED;
1311

1312
	if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1313
		DRM_DEBUG("execbuf with unknown ring: %d\n",
1314 1315 1316
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1317 1318 1319

	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
		ring = &dev_priv->ring[RCS];
1320 1321 1322 1323 1324 1325 1326 1327
	else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
		if (HAS_BSD2(dev)) {
			int ring_id;
			ring_id = gen8_dispatch_bsd_ring(dev, file);
			ring = &dev_priv->ring[ring_id];
		} else
			ring = &dev_priv->ring[VCS];
	} else
1328 1329
		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];

1330 1331 1332 1333 1334
	if (!intel_ring_initialized(ring)) {
		DRM_DEBUG("execbuf with invalid ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1335 1336

	if (args->buffer_count < 1) {
1337
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1338 1339 1340
		return -EINVAL;
	}

1341 1342
	intel_runtime_pm_get(dev_priv);

1343 1344 1345 1346
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1347
	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1348
	if (IS_ERR(ctx)) {
1349
		mutex_unlock(&dev->struct_mutex);
1350
		ret = PTR_ERR(ctx);
1351
		goto pre_mutex_err;
1352
	}
1353 1354 1355

	i915_gem_context_reference(ctx);

1356 1357 1358
	if (ctx->ppgtt)
		vm = &ctx->ppgtt->base;
	else
1359
		vm = &dev_priv->gtt.base;
1360

B
Ben Widawsky 已提交
1361
	eb = eb_create(args);
1362
	if (eb == NULL) {
1363
		i915_gem_context_unreference(ctx);
1364 1365 1366 1367 1368
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1369
	/* Look up object handles */
1370
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1371 1372
	if (ret)
		goto err;
1373

1374
	/* take note of the batch buffer before we might reorder the lists */
1375
	batch_obj = eb_get_batch(eb);
1376

1377
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1378
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1379
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1380 1381 1382 1383
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1384
	if (need_relocs)
B
Ben Widawsky 已提交
1385
		ret = i915_gem_execbuffer_relocate(eb);
1386 1387
	if (ret) {
		if (ret == -EFAULT) {
1388
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1389
								eb, exec);
1390 1391 1392 1393 1394 1395 1396 1397
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1398
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1399 1400 1401 1402 1403
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1404 1405 1406 1407 1408
	if (i915_needs_cmd_parser(ring)) {
		ret = i915_parse_cmds(ring,
				      batch_obj,
				      args->batch_start_offset,
				      file->is_master);
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
		if (ret) {
			if (ret != -EACCES)
				goto err;
		} else {
			/*
			 * XXX: Actually do this when enabling batch copy...
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
			 * from MI_BATCH_BUFFER_START commands issued in the
			 * dispatch_execbuffer implementations. We specifically don't
			 * want that set when the command parser is enabled.
			 */
		}
1422 1423
	}

1424 1425
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1426
	 * hsw should have this fixed, but bdw mucks it up again. */
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
	if (flags & I915_DISPATCH_SECURE) {
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
		 *   so we don't really have issues with mutliple objects not
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
		if (ret)
			goto err;
1441

1442
		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1443
	} else
1444
		exec_start += i915_gem_obj_offset(batch_obj, vm);
1445

1446 1447
	ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
				      &eb->vmas, batch_obj, exec_start, flags);
1448

1449 1450 1451 1452 1453 1454 1455 1456
	/*
	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
	 * batch vma for correctness. For less ugly and less fragility this
	 * needs to be adjusted to also track the ggtt batch vma properly as
	 * active.
	 */
	if (flags & I915_DISPATCH_SECURE)
		i915_gem_object_ggtt_unpin(batch_obj);
1457
err:
1458 1459
	/* the request owns the ref now */
	i915_gem_context_unreference(ctx);
1460
	eb_destroy(eb);
1461 1462 1463 1464

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
1465 1466 1467
	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
	intel_runtime_pm_put(dev_priv);
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1486
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1487 1488 1489 1490 1491 1492 1493
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1494
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1495 1496 1497 1498 1499 1500
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1501
			     to_user_ptr(args->buffers_ptr),
1502 1503
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1504
		DRM_DEBUG("copy %d exec entries failed %d\n",
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1532
	i915_execbuffer2_set_context_id(exec2, 0);
1533

1534
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1535
	if (!ret) {
1536 1537 1538
		struct drm_i915_gem_exec_object __user *user_exec_list =
			to_user_ptr(args->buffers_ptr);

1539
		/* Copy the new buffer offsets back to the user's exec list. */
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
		for (i = 0; i < args->buffer_count; i++) {
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user (%d)\n",
					  args->buffer_count, ret);
				break;
			}
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1567 1568
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1569
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1570 1571 1572
		return -EINVAL;
	}

1573 1574 1575 1576 1577
	if (args->rsvd2 != 0) {
		DRM_DEBUG("dirty rvsd2 field\n");
		return -EINVAL;
	}

1578
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1579
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1580 1581 1582
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1583
	if (exec2_list == NULL) {
1584
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1585 1586 1587 1588
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1589
			     to_user_ptr(args->buffers_ptr),
1590 1591
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1592
		DRM_DEBUG("copy %d exec entries failed %d\n",
1593 1594 1595 1596 1597
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1598
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1599 1600
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
1601
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
				   to_user_ptr(args->buffers_ptr);
		int i;

		for (i = 0; i < args->buffer_count; i++) {
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user\n",
					  args->buffer_count);
				break;
			}
1616 1617 1618 1619 1620 1621
		}
	}

	drm_free_large(exec2_list);
	return ret;
}