i915_gem_execbuffer.c 41.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35

36 37
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
38
#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
39 40 41
#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)

#define BATCH_OFFSET_BIAS (256*1024)
42

43 44
struct eb_vmas {
	struct list_head vmas;
45
	int and;
46
	union {
47
		struct i915_vma *lut[0];
48 49
		struct hlist_head buckets[0];
	};
50 51
};

52
static struct eb_vmas *
B
Ben Widawsky 已提交
53
eb_create(struct drm_i915_gem_execbuffer2 *args)
54
{
55
	struct eb_vmas *eb = NULL;
56 57

	if (args->flags & I915_EXEC_HANDLE_LUT) {
58
		unsigned size = args->buffer_count;
59 60
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
61 62 63 64
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
65 66
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
67
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
68 69 70
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
71
			     sizeof(struct eb_vmas),
72 73 74 75 76 77 78 79
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

80
	INIT_LIST_HEAD(&eb->vmas);
81 82 83 84
	return eb;
}

static void
85
eb_reset(struct eb_vmas *eb)
86
{
87 88
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
89 90
}

91
static int
92 93 94 95 96
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
97
{
98 99
	struct drm_i915_gem_object *obj;
	struct list_head objects;
100
	int i, ret;
101

102
	INIT_LIST_HEAD(&objects);
103
	spin_lock(&file->table_lock);
104 105
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
106
	for (i = 0; i < args->buffer_count; i++) {
107 108 109 110 111
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
112
			ret = -ENOENT;
113
			goto err;
114 115
		}

116
		if (!list_empty(&obj->obj_exec_link)) {
117 118 119
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
120
			ret = -EINVAL;
121
			goto err;
122 123 124
		}

		drm_gem_object_reference(&obj->base);
125 126 127
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
128

129
	i = 0;
130
	while (!list_empty(&objects)) {
131
		struct i915_vma *vma;
132

133 134 135
		if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
		    USES_FULL_PPGTT(vm->dev)) {
			ret = -EINVAL;
136
			goto err;
137 138
		}

139 140 141 142
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);

143 144 145 146 147 148 149 150
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
151
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
152 153 154
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
155
			goto err;
156 157
		}

158
		/* Transfer ownership from the objects list to the vmas list. */
159
		list_add_tail(&vma->exec_list, &eb->vmas);
160
		list_del_init(&obj->obj_exec_link);
161 162

		vma->exec_entry = &exec[i];
163
		if (eb->and < 0) {
164
			eb->lut[i] = vma;
165 166
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
167 168
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
169 170
				       &eb->buckets[handle & eb->and]);
		}
171
		++i;
172 173
	}

174
	return 0;
175 176


177
err:
178 179 180 181 182
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
183
		drm_gem_object_unreference(&obj->base);
184
	}
185 186 187 188 189
	/*
	 * Objects already transfered to the vmas list will be unreferenced by
	 * eb_destroy.
	 */

190
	return ret;
191 192
}

193
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
194
{
195 196 197 198 199 200 201
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
		struct hlist_node *node;
202

203 204
		head = &eb->buckets[handle & eb->and];
		hlist_for_each(node, head) {
205
			struct i915_vma *vma;
206

207 208 209
			vma = hlist_entry(node, struct i915_vma, exec_node);
			if (vma->exec_handle == handle)
				return vma;
210 211 212
		}
		return NULL;
	}
213 214
}

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
230
		vma->pin_count--;
231 232 233 234 235 236

	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

static void eb_destroy(struct eb_vmas *eb)
{
237 238
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
239

240 241
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
242
				       exec_list);
243
		list_del_init(&vma->exec_list);
244
		i915_gem_execbuffer_unreserve_vma(vma);
245
		drm_gem_object_unreference(&vma->obj->base);
246
	}
247 248 249
	kfree(eb);
}

250 251
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
252 253
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
254
		!obj->map_and_fenceable ||
255 256 257
		obj->cache_level != I915_CACHE_NONE);
}

258 259
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
260 261
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
262
{
263
	struct drm_device *dev = obj->base.dev;
264
	uint32_t page_offset = offset_in_page(reloc->offset);
B
Ben Widawsky 已提交
265
	uint64_t delta = reloc->delta + target_offset;
266
	char *vaddr;
267
	int ret;
268

269
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
270 271 272 273 274
	if (ret)
		return ret;

	vaddr = kmap_atomic(i915_gem_object_get_page(obj,
				reloc->offset >> PAGE_SHIFT));
B
Ben Widawsky 已提交
275
	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
276 277 278 279 280 281 282 283 284 285

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
			vaddr = kmap_atomic(i915_gem_object_get_page(obj,
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

B
Ben Widawsky 已提交
286
		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
287 288
	}

289 290 291 292 293 294 295
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
296 297
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
298 299 300
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
B
Ben Widawsky 已提交
301
	uint64_t delta = reloc->delta + target_offset;
302 303
	uint32_t __iomem *reloc_entry;
	void __iomem *reloc_page;
304
	int ret;
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
	reloc->offset += i915_gem_obj_ggtt_offset(obj);
	reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
			reloc->offset & PAGE_MASK);
	reloc_entry = (uint32_t __iomem *)
		(reloc_page + offset_in_page(reloc->offset));
B
Ben Widawsky 已提交
320
	iowrite32(lower_32_bits(delta), reloc_entry);
321 322 323 324 325 326 327 328 329 330 331 332

	if (INTEL_INFO(dev)->gen >= 8) {
		reloc_entry += 1;

		if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
			io_mapping_unmap_atomic(reloc_page);
			reloc_page = io_mapping_map_atomic_wc(
					dev_priv->gtt.mappable,
					reloc->offset + sizeof(uint32_t));
			reloc_entry = reloc_page;
		}

B
Ben Widawsky 已提交
333
		iowrite32(upper_32_bits(delta), reloc_entry);
334 335
	}

336 337 338 339 340
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

341 342
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
343
				   struct eb_vmas *eb,
344
				   struct drm_i915_gem_relocation_entry *reloc)
345 346 347
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
348
	struct drm_i915_gem_object *target_i915_obj;
349
	struct i915_vma *target_vma;
B
Ben Widawsky 已提交
350
	uint64_t target_offset;
351
	int ret;
352

353
	/* we've already hold a reference to all valid objects */
354 355
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
356
		return -ENOENT;
357 358
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
359

360
	target_offset = target_vma->node.start;
361

362 363 364 365 366 367
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
	    !target_i915_obj->has_global_gtt_mapping)) {
368 369 370
		struct i915_vma *vma =
			list_first_entry(&target_i915_obj->vma_list,
					 typeof(*vma), vma_link);
371
		vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
372 373
	}

374
	/* Validate that the target is in a valid r/w GPU domain */
375
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
376
		DRM_DEBUG("reloc with multiple write domains: "
377 378 379 380 381 382
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
383
		return -EINVAL;
384
	}
385 386
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
387
		DRM_DEBUG("reloc with read/write non-GPU domains: "
388 389 390 391 392 393
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
394
		return -EINVAL;
395 396 397 398 399 400 401 402 403
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
404
		return 0;
405 406

	/* Check that the relocation address is valid... */
407 408
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
409
		DRM_DEBUG("Relocation beyond object bounds: "
410 411 412 413
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
414
		return -EINVAL;
415
	}
416
	if (unlikely(reloc->offset & 3)) {
417
		DRM_DEBUG("Relocation not 4-byte aligned: "
418 419 420
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
421
		return -EINVAL;
422 423
	}

424 425 426 427
	/* We can't wait for rendering with pagefaults disabled */
	if (obj->active && in_atomic())
		return -EFAULT;

428
	if (use_cpu_reloc(obj))
B
Ben Widawsky 已提交
429
		ret = relocate_entry_cpu(obj, reloc, target_offset);
430
	else
B
Ben Widawsky 已提交
431
		ret = relocate_entry_gtt(obj, reloc, target_offset);
432

433 434 435
	if (ret)
		return ret;

436 437 438
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

439
	return 0;
440 441 442
}

static int
443 444
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
445
{
446 447
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
448
	struct drm_i915_gem_relocation_entry __user *user_relocs;
449
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
450
	int remain, ret;
451

V
Ville Syrjälä 已提交
452
	user_relocs = to_user_ptr(entry->relocs_ptr);
453

454 455 456 457 458 459 460 461 462
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
463 464
			return -EFAULT;

465 466
		do {
			u64 offset = r->presumed_offset;
467

468
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
469 470 471 472 473 474 475 476 477 478 479 480 481
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
482 483 484
	}

	return 0;
485
#undef N_RELOC
486 487 488
}

static int
489 490 491
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
492
{
493
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
494 495 496
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
497
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
498 499 500 501 502 503 504 505
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
506
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
507
{
508
	struct i915_vma *vma;
509 510 511 512 513 514 515 516 517 518
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
519 520
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
521
		if (ret)
522
			break;
523
	}
524
	pagefault_enable();
525

526
	return ret;
527 528
}

529
static int
530
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
531
				struct intel_engine_cs *ring,
532
				bool *need_reloc)
533
{
534
	struct drm_i915_gem_object *obj = vma->obj;
535
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
536
	uint64_t flags;
537 538
	int ret;

539
	flags = 0;
540
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
541 542
		flags |= PIN_MAPPABLE;
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
543
		flags |= PIN_GLOBAL;
544 545
	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
		flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
546 547

	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
548 549 550
	if (ret)
		return ret;

551 552
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

553 554 555 556
	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
		ret = i915_gem_object_get_fence(obj);
		if (ret)
			return ret;
557

558 559
		if (i915_gem_object_pin_fence(obj))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
560 561
	}

562 563
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
564 565 566 567 568 569 570 571
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

572
	return 0;
573
}
574

575
static bool
576
need_reloc_mappable(struct i915_vma *vma)
577 578 579
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	if (entry->relocation_count == 0)
		return false;

	if (!i915_is_ggtt(vma->vm))
		return false;

	/* See also use_cpu_reloc() */
	if (HAS_LLC(vma->obj->base.dev))
		return false;

	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

	return true;
}

static bool
eb_vma_misplaced(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	struct drm_i915_gem_object *obj = vma->obj;
601

602
	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
603 604 605 606 607 608
	       !i915_is_ggtt(vma->vm));

	if (entry->alignment &&
	    vma->node.start & (entry->alignment - 1))
		return true;

609
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
610 611 612 613 614 615 616 617 618
		return true;

	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

	return false;
}

619
static int
620
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
621
			    struct list_head *vmas,
622
			    bool *need_relocs)
623
{
624
	struct drm_i915_gem_object *obj;
625
	struct i915_vma *vma;
626
	struct i915_address_space *vm;
627
	struct list_head ordered_vmas;
628 629
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
	int retry;
630

631 632 633
	if (list_empty(vmas))
		return 0;

634 635
	i915_gem_retire_requests_ring(ring);

636 637
	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

638 639
	INIT_LIST_HEAD(&ordered_vmas);
	while (!list_empty(vmas)) {
640 641 642
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

643 644 645
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
646

647 648
		if (!has_fenced_gpu_access)
			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
649 650 651
		need_fence =
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
652
		need_mappable = need_fence || need_reloc_mappable(vma);
653

654 655
		if (need_mappable) {
			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
656
			list_move(&vma->exec_list, &ordered_vmas);
657
		} else
658
			list_move_tail(&vma->exec_list, &ordered_vmas);
659

660
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
661
		obj->base.pending_write_domain = 0;
662
	}
663
	list_splice(&ordered_vmas, vmas);
664 665 666 667 668 669 670 671 672 673

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
674
	 * This avoid unnecessary unbinding of later objects in order to make
675 676 677 678
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
679
		int ret = 0;
680 681

		/* Unbind any ill-fitting objects or pin. */
682 683
		list_for_each_entry(vma, vmas, exec_list) {
			if (!drm_mm_node_allocated(&vma->node))
684 685
				continue;

686
			if (eb_vma_misplaced(vma))
687
				ret = i915_vma_unbind(vma);
688
			else
689
				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
690
			if (ret)
691 692 693 694
				goto err;
		}

		/* Bind fresh objects */
695 696
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
697
				continue;
698

699
			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
700 701
			if (ret)
				goto err;
702 703
		}

704
err:
C
Chris Wilson 已提交
705
		if (ret != -ENOSPC || retry++)
706 707
			return ret;

708 709 710 711
		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

712
		ret = i915_gem_evict_vm(vm, true);
713 714 715 716 717 718 719
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
720
				  struct drm_i915_gem_execbuffer2 *args,
721
				  struct drm_file *file,
722
				  struct intel_engine_cs *ring,
723 724
				  struct eb_vmas *eb,
				  struct drm_i915_gem_exec_object2 *exec)
725 726
{
	struct drm_i915_gem_relocation_entry *reloc;
727 728
	struct i915_address_space *vm;
	struct i915_vma *vma;
729
	bool need_relocs;
730
	int *reloc_offset;
731
	int i, total, ret;
732
	unsigned count = args->buffer_count;
733

734 735 736 737 738
	if (WARN_ON(list_empty(&eb->vmas)))
		return 0;

	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

739
	/* We may process another execbuffer during the unlock... */
740 741 742
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
743
		i915_gem_execbuffer_unreserve_vma(vma);
744
		drm_gem_object_unreference(&vma->obj->base);
745 746
	}

747 748 749 750
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
751
		total += exec[i].relocation_count;
752

753
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
754
	reloc = drm_malloc_ab(total, sizeof(*reloc));
755 756 757
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
758 759 760 761 762 763 764
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
765 766
		u64 invalid_offset = (u64)-1;
		int j;
767

V
Ville Syrjälä 已提交
768
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
769 770

		if (copy_from_user(reloc+total, user_relocs,
771
				   exec[i].relocation_count * sizeof(*reloc))) {
772 773 774 775 776
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

777 778 779 780 781 782 783 784 785 786
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
787 788 789
			if (__copy_to_user(&user_relocs[j].presumed_offset,
					   &invalid_offset,
					   sizeof(invalid_offset))) {
790 791 792 793 794 795
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

796
		reloc_offset[i] = total;
797
		total += exec[i].relocation_count;
798 799 800 801 802 803 804 805
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

806 807
	/* reacquire the objects */
	eb_reset(eb);
808
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
809 810
	if (ret)
		goto err;
811

812
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
813
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
814 815 816
	if (ret)
		goto err;

817 818 819 820
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
821 822 823 824 825 826 827 828 829 830 831 832
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
833
	drm_free_large(reloc_offset);
834 835 836 837
	return ret;
}

static int
838
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
839
				struct list_head *vmas)
840
{
841
	struct i915_vma *vma;
842
	uint32_t flush_domains = 0;
843
	bool flush_chipset = false;
844
	int ret;
845

846 847
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
848
		ret = i915_gem_object_sync(obj, ring);
849 850
		if (ret)
			return ret;
851 852

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
853
			flush_chipset |= i915_gem_clflush_object(obj, false);
854 855

		flush_domains |= obj->base.write_domain;
856 857
	}

858
	if (flush_chipset)
859
		i915_gem_chipset_flush(ring->dev);
860 861 862 863

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

864 865 866
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
867
	return intel_ring_invalidate_all_caches(ring);
868 869
}

870 871
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
872
{
873 874 875
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

876
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
877 878 879 880 881 882 883
}

static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
		   int count)
{
	int i;
884 885
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
886 887

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
888
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
889 890
		int length; /* limited by fault_in_pages_readable() */

891 892 893
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
			return -EINVAL;

894 895 896 897 898
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
899
			return -EINVAL;
900
		relocs_total += exec[i].relocation_count;
901 902 903

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
904 905 906 907 908
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
909 910 911
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

912
		if (likely(!i915.prefault_disable)) {
913 914 915
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
916 917 918 919 920
	}

	return 0;
}

921
static struct intel_context *
922
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
923
			  struct intel_engine_cs *ring, const u32 ctx_id)
924
{
925
	struct intel_context *ctx = NULL;
926 927
	struct i915_ctx_hang_stats *hs;

928
	if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
929 930
		return ERR_PTR(-EINVAL);

931
	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
932
	if (IS_ERR(ctx))
933
		return ctx;
934

935
	hs = &ctx->hang_stats;
936 937
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
938
		return ERR_PTR(-EIO);
939 940
	}

941
	return ctx;
942 943
}

944
static void
945
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
946
				   struct intel_engine_cs *ring)
947
{
948
	u32 seqno = intel_ring_get_seqno(ring);
949
	struct i915_vma *vma;
950

951
	list_for_each_entry(vma, vmas, exec_list) {
952
		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
953
		struct drm_i915_gem_object *obj = vma->obj;
954 955
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
956

957
		obj->base.write_domain = obj->base.pending_write_domain;
958 959 960
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
961

B
Ben Widawsky 已提交
962
		i915_vma_move_to_active(vma, ring);
963 964
		if (obj->base.write_domain) {
			obj->dirty = 1;
965
			obj->last_write_seqno = seqno;
966 967

			intel_fb_obj_invalidate(obj, ring);
968 969 970

			/* update for the implicit flush after a batch */
			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
971
		}
972 973 974 975 976 977 978 979
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
			obj->last_fenced_seqno = seqno;
			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
				struct drm_i915_private *dev_priv = to_i915(ring->dev);
				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
					       &dev_priv->mm.fence_list);
			}
		}
980

C
Chris Wilson 已提交
981
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
982 983 984
	}
}

985 986
static void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
987
				    struct drm_file *file,
988
				    struct intel_engine_cs *ring,
989
				    struct drm_i915_gem_object *obj)
990
{
991 992
	/* Unconditionally force add_request to emit a full flush. */
	ring->gpu_caches_dirty = true;
993

994
	/* Add a breadcrumb for the completion of the batch buffer */
995
	(void)__i915_add_request(ring, file, obj, NULL);
996
}
997

998 999
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
1000
			    struct intel_engine_cs *ring)
1001
{
1002
	struct drm_i915_private *dev_priv = dev->dev_private;
1003 1004
	int ret, i;

1005 1006 1007 1008
	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024

	ret = intel_ring_begin(ring, 4 * 3);
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
	}

	intel_ring_advance(ring);

	return 0;
}

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
static int
legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
			     struct intel_engine_cs *ring,
			     struct intel_context *ctx,
			     struct drm_i915_gem_execbuffer2 *args,
			     struct list_head *vmas,
			     struct drm_i915_gem_object *batch_obj,
			     u64 exec_start, u32 flags)
{
	struct drm_clip_rect *cliprects = NULL;
	struct drm_i915_private *dev_priv = dev->dev_private;
	u64 exec_len;
	int instp_mode;
	u32 instp_mask;
	int i, ret = 0;

	if (args->num_cliprects != 0) {
		if (ring != &dev_priv->ring[RCS]) {
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
			return -EINVAL;
		}

		if (INTEL_INFO(dev)->gen >= 5) {
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
			return -EINVAL;
		}

		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
			DRM_DEBUG("execbuf with %u cliprects\n",
				  args->num_cliprects);
			return -EINVAL;
		}

		cliprects = kcalloc(args->num_cliprects,
				    sizeof(*cliprects),
				    GFP_KERNEL);
		if (cliprects == NULL) {
			ret = -ENOMEM;
			goto error;
		}

		if (copy_from_user(cliprects,
				   to_user_ptr(args->cliprects_ptr),
				   sizeof(*cliprects)*args->num_cliprects)) {
			ret = -EFAULT;
			goto error;
		}
	} else {
		if (args->DR4 == 0xffffffff) {
			DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
			args->DR4 = 0;
		}

		if (args->DR1 || args->DR4 || args->cliprects_ptr) {
			DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
			return -EINVAL;
		}
	}

	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
	if (ret)
		goto error;

	ret = i915_switch_context(ring, ctx);
	if (ret)
		goto error;

	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
	instp_mask = I915_EXEC_CONSTANTS_MASK;
	switch (instp_mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
		if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
			ret = -EINVAL;
			goto error;
		}

		if (instp_mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4) {
				DRM_DEBUG("no rel constants on pre-gen4\n");
				ret = -EINVAL;
				goto error;
			}

			if (INTEL_INFO(dev)->gen > 5 &&
			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
				ret = -EINVAL;
				goto error;
			}

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
		}
		break;
	default:
		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
		ret = -EINVAL;
		goto error;
	}

	if (ring == &dev_priv->ring[RCS] &&
			instp_mode != dev_priv->relative_constants_mode) {
		ret = intel_ring_begin(ring, 4);
		if (ret)
			goto error;

		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit(ring, INSTPM);
		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
		intel_ring_advance(ring);

		dev_priv->relative_constants_mode = instp_mode;
	}

	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(dev, ring);
		if (ret)
			goto error;
	}

	exec_len = args->batch_len;
	if (cliprects) {
		for (i = 0; i < args->num_cliprects; i++) {
			ret = i915_emit_box(dev, &cliprects[i],
					    args->DR1, args->DR4);
			if (ret)
				goto error;

			ret = ring->dispatch_execbuffer(ring,
							exec_start, exec_len,
							flags);
			if (ret)
				goto error;
		}
	} else {
		ret = ring->dispatch_execbuffer(ring,
						exec_start, exec_len,
						flags);
		if (ret)
			return ret;
	}

	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);

	i915_gem_execbuffer_move_to_active(vmas, ring);
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);

error:
	kfree(cliprects);
	return ret;
}

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
 * The Ring ID is returned.
 */
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
				  struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_file_private *file_priv = file->driver_priv;

	/* Check whether the file_priv is using one ring */
	if (file_priv->bsd_ring)
		return file_priv->bsd_ring->id;
	else {
		/* If no, use the ping-pong mechanism to select one ring */
		int ring_id;

		mutex_lock(&dev->struct_mutex);
1200
		if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1201
			ring_id = VCS;
1202
			dev_priv->mm.bsd_ring_dispatch_index = 1;
1203 1204
		} else {
			ring_id = VCS2;
1205
			dev_priv->mm.bsd_ring_dispatch_index = 0;
1206 1207 1208 1209 1210 1211 1212
		}
		file_priv->bsd_ring = &dev_priv->ring[ring_id];
		mutex_unlock(&dev->struct_mutex);
		return ring_id;
	}
}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
{
	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);

	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return vma->obj;
}

1232 1233 1234 1235
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1236
		       struct drm_i915_gem_exec_object2 *exec)
1237
{
1238
	struct drm_i915_private *dev_priv = dev->dev_private;
1239
	struct eb_vmas *eb;
1240
	struct drm_i915_gem_object *batch_obj;
1241
	struct intel_engine_cs *ring;
1242
	struct intel_context *ctx;
1243
	struct i915_address_space *vm;
1244
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1245 1246 1247
	u64 exec_start = args->batch_start_offset;
	u32 flags;
	int ret;
1248
	bool need_relocs;
1249

1250
	if (!i915_gem_check_execbuffer(args))
1251 1252 1253
		return -EINVAL;

	ret = validate_exec_list(exec, args->buffer_count);
1254 1255 1256
	if (ret)
		return ret;

1257 1258 1259 1260 1261 1262 1263
	flags = 0;
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

		flags |= I915_DISPATCH_SECURE;
	}
1264 1265
	if (args->flags & I915_EXEC_IS_PINNED)
		flags |= I915_DISPATCH_PINNED;
1266

1267
	if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1268
		DRM_DEBUG("execbuf with unknown ring: %d\n",
1269 1270 1271
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1272 1273 1274

	if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
		ring = &dev_priv->ring[RCS];
1275 1276 1277 1278 1279 1280 1281 1282
	else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
		if (HAS_BSD2(dev)) {
			int ring_id;
			ring_id = gen8_dispatch_bsd_ring(dev, file);
			ring = &dev_priv->ring[ring_id];
		} else
			ring = &dev_priv->ring[VCS];
	} else
1283 1284
		ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];

1285 1286 1287 1288 1289
	if (!intel_ring_initialized(ring)) {
		DRM_DEBUG("execbuf with invalid ring: %d\n",
			  (int)(args->flags & I915_EXEC_RING_MASK));
		return -EINVAL;
	}
1290 1291

	if (args->buffer_count < 1) {
1292
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1293 1294 1295
		return -EINVAL;
	}

1296 1297
	intel_runtime_pm_get(dev_priv);

1298 1299 1300 1301
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1302
	if (dev_priv->ums.mm_suspended) {
1303 1304 1305 1306 1307
		mutex_unlock(&dev->struct_mutex);
		ret = -EBUSY;
		goto pre_mutex_err;
	}

1308
	ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1309
	if (IS_ERR(ctx)) {
1310
		mutex_unlock(&dev->struct_mutex);
1311
		ret = PTR_ERR(ctx);
1312
		goto pre_mutex_err;
1313
	}
1314 1315 1316

	i915_gem_context_reference(ctx);

1317 1318 1319
	vm = ctx->vm;
	if (!USES_FULL_PPGTT(dev))
		vm = &dev_priv->gtt.base;
1320

B
Ben Widawsky 已提交
1321
	eb = eb_create(args);
1322
	if (eb == NULL) {
1323
		i915_gem_context_unreference(ctx);
1324 1325 1326 1327 1328
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1329
	/* Look up object handles */
1330
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1331 1332
	if (ret)
		goto err;
1333

1334
	/* take note of the batch buffer before we might reorder the lists */
1335
	batch_obj = eb_get_batch(eb);
1336

1337
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1338
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1339
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1340 1341 1342 1343
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1344
	if (need_relocs)
B
Ben Widawsky 已提交
1345
		ret = i915_gem_execbuffer_relocate(eb);
1346 1347
	if (ret) {
		if (ret == -EFAULT) {
1348
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1349
								eb, exec);
1350 1351 1352 1353 1354 1355 1356 1357
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1358
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1359 1360 1361 1362 1363
		ret = -EINVAL;
		goto err;
	}
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
	if (i915_needs_cmd_parser(ring)) {
		ret = i915_parse_cmds(ring,
				      batch_obj,
				      args->batch_start_offset,
				      file->is_master);
		if (ret)
			goto err;

		/*
		 * XXX: Actually do this when enabling batch copy...
		 *
		 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
		 * from MI_BATCH_BUFFER_START commands issued in the
		 * dispatch_execbuffer implementations. We specifically don't
		 * want that set when the command parser is enabled.
		 */
	}

1382 1383
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1384
	 * hsw should have this fixed, but bdw mucks it up again. */
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
	if (flags & I915_DISPATCH_SECURE) {
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
		 *   so we don't really have issues with mutliple objects not
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
		if (ret)
			goto err;
1399

1400
		exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1401
	} else
1402
		exec_start += i915_gem_obj_offset(batch_obj, vm);
1403

1404
	ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
1405
					   args, &eb->vmas, batch_obj, exec_start, flags);
1406

1407 1408 1409 1410 1411 1412 1413 1414
	/*
	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
	 * batch vma for correctness. For less ugly and less fragility this
	 * needs to be adjusted to also track the ggtt batch vma properly as
	 * active.
	 */
	if (flags & I915_DISPATCH_SECURE)
		i915_gem_object_ggtt_unpin(batch_obj);
1415
err:
1416 1417
	/* the request owns the ref now */
	i915_gem_context_unreference(ctx);
1418
	eb_destroy(eb);
1419 1420 1421 1422

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
1423 1424 1425
	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
	intel_runtime_pm_put(dev_priv);
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1444
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1445 1446 1447 1448 1449 1450 1451
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1452
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1453 1454 1455 1456 1457 1458
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1459
			     to_user_ptr(args->buffers_ptr),
1460 1461
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1462
		DRM_DEBUG("copy %d exec entries failed %d\n",
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1490
	i915_execbuffer2_set_context_id(exec2, 0);
1491

1492
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1493
	if (!ret) {
1494 1495 1496
		struct drm_i915_gem_exec_object __user *user_exec_list =
			to_user_ptr(args->buffers_ptr);

1497
		/* Copy the new buffer offsets back to the user's exec list. */
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
		for (i = 0; i < args->buffer_count; i++) {
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user (%d)\n",
					  args->buffer_count, ret);
				break;
			}
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1525 1526
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1527
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1528 1529 1530
		return -EINVAL;
	}

1531 1532 1533 1534 1535
	if (args->rsvd2 != 0) {
		DRM_DEBUG("dirty rvsd2 field\n");
		return -EINVAL;
	}

1536
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1537
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1538 1539 1540
	if (exec2_list == NULL)
		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
					   args->buffer_count);
1541
	if (exec2_list == NULL) {
1542
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1543 1544 1545 1546
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1547
			     to_user_ptr(args->buffers_ptr),
1548 1549
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1550
		DRM_DEBUG("copy %d exec entries failed %d\n",
1551 1552 1553 1554 1555
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1556
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1557 1558
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
1559
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
				   to_user_ptr(args->buffers_ptr);
		int i;

		for (i = 0; i < args->buffer_count; i++) {
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user\n",
					  args->buffer_count);
				break;
			}
1574 1575 1576 1577 1578 1579
		}
	}

	drm_free_large(exec2_list);
	return ret;
}