i915_gem_execbuffer.c 49.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35
#include <linux/uaccess.h>
36

37 38
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
39
#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
40 41 42
#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)

#define BATCH_OFFSET_BIAS (256*1024)
43

44 45
struct eb_vmas {
	struct list_head vmas;
46
	int and;
47
	union {
48
		struct i915_vma *lut[0];
49 50
		struct hlist_head buckets[0];
	};
51 52
};

53
static struct eb_vmas *
B
Ben Widawsky 已提交
54
eb_create(struct drm_i915_gem_execbuffer2 *args)
55
{
56
	struct eb_vmas *eb = NULL;
57 58

	if (args->flags & I915_EXEC_HANDLE_LUT) {
59
		unsigned size = args->buffer_count;
60 61
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
62 63 64 65
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
66 67
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
68
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
69 70 71
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
72
			     sizeof(struct eb_vmas),
73 74 75 76 77 78 79 80
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

81
	INIT_LIST_HEAD(&eb->vmas);
82 83 84 85
	return eb;
}

static void
86
eb_reset(struct eb_vmas *eb)
87
{
88 89
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
90 91
}

92
static int
93 94 95 96 97
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
98
{
99 100
	struct drm_i915_gem_object *obj;
	struct list_head objects;
101
	int i, ret;
102

103
	INIT_LIST_HEAD(&objects);
104
	spin_lock(&file->table_lock);
105 106
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
107
	for (i = 0; i < args->buffer_count; i++) {
108 109 110 111 112
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
113
			ret = -ENOENT;
114
			goto err;
115 116
		}

117
		if (!list_empty(&obj->obj_exec_link)) {
118 119 120
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
121
			ret = -EINVAL;
122
			goto err;
123 124 125
		}

		drm_gem_object_reference(&obj->base);
126 127 128
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
129

130
	i = 0;
131
	while (!list_empty(&objects)) {
132
		struct i915_vma *vma;
133

134 135 136 137
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);

138 139 140 141 142 143 144 145
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
146
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
147 148 149
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
150
			goto err;
151 152
		}

153
		/* Transfer ownership from the objects list to the vmas list. */
154
		list_add_tail(&vma->exec_list, &eb->vmas);
155
		list_del_init(&obj->obj_exec_link);
156 157

		vma->exec_entry = &exec[i];
158
		if (eb->and < 0) {
159
			eb->lut[i] = vma;
160 161
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
162 163
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
164 165
				       &eb->buckets[handle & eb->and]);
		}
166
		++i;
167 168
	}

169
	return 0;
170 171


172
err:
173 174 175 176 177
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
178
		drm_gem_object_unreference(&obj->base);
179
	}
180 181 182 183 184
	/*
	 * Objects already transfered to the vmas list will be unreferenced by
	 * eb_destroy.
	 */

185
	return ret;
186 187
}

188
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
189
{
190 191 192 193 194 195
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
196
		struct i915_vma *vma;
197

198
		head = &eb->buckets[handle & eb->and];
199
		hlist_for_each_entry(vma, head, exec_node) {
200 201
			if (vma->exec_handle == handle)
				return vma;
202 203 204
		}
		return NULL;
	}
205 206
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
222
		vma->pin_count--;
223

C
Chris Wilson 已提交
224
	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
225 226 227 228
}

static void eb_destroy(struct eb_vmas *eb)
{
229 230
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
231

232 233
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
234
				       exec_list);
235
		list_del_init(&vma->exec_list);
236
		i915_gem_execbuffer_unreserve_vma(vma);
237
		drm_gem_object_unreference(&vma->obj->base);
238
	}
239 240 241
	kfree(eb);
}

242 243
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
244 245
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
246 247 248
		obj->cache_level != I915_CACHE_NONE);
}

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
/* Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline uint64_t gen8_canonical_addr(uint64_t address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline uint64_t gen8_noncanonical_addr(uint64_t address)
{
	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
}

static inline uint64_t
relocation_target(struct drm_i915_gem_relocation_entry *reloc,
		  uint64_t target_offset)
{
	return gen8_canonical_addr((int)reloc->delta + target_offset);
}

274 275
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
276 277
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
278
{
279
	struct drm_device *dev = obj->base.dev;
280
	uint32_t page_offset = offset_in_page(reloc->offset);
281
	uint64_t delta = relocation_target(reloc, target_offset);
282
	char *vaddr;
283
	int ret;
284

285
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
286 287 288
	if (ret)
		return ret;

289
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
290
				reloc->offset >> PAGE_SHIFT));
B
Ben Widawsky 已提交
291
	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
292 293 294 295 296 297

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
298
			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
299 300 301
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

B
Ben Widawsky 已提交
302
		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
303 304
	}

305 306 307 308 309 310 311
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
312 313
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
314 315
{
	struct drm_device *dev = obj->base.dev;
316 317
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
318
	uint64_t delta = relocation_target(reloc, target_offset);
319
	uint64_t offset;
320
	void __iomem *reloc_page;
321
	int ret;
322 323 324 325 326 327 328 329 330 331

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
332 333
	offset = i915_gem_obj_ggtt_offset(obj);
	offset += reloc->offset;
334
	reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
335 336
					      offset & PAGE_MASK);
	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
337 338

	if (INTEL_INFO(dev)->gen >= 8) {
339
		offset += sizeof(uint32_t);
340

341
		if (offset_in_page(offset) == 0) {
342
			io_mapping_unmap_atomic(reloc_page);
343
			reloc_page =
344
				io_mapping_map_atomic_wc(ggtt->mappable,
345
							 offset);
346 347
		}

348 349
		iowrite32(upper_32_bits(delta),
			  reloc_page + offset_in_page(offset));
350 351
	}

352 353 354 355 356
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
static void
clflush_write32(void *addr, uint32_t value)
{
	/* This is not a fast path, so KISS. */
	drm_clflush_virt_range(addr, sizeof(uint32_t));
	*(uint32_t *)addr = value;
	drm_clflush_virt_range(addr, sizeof(uint32_t));
}

static int
relocate_entry_clflush(struct drm_i915_gem_object *obj,
		       struct drm_i915_gem_relocation_entry *reloc,
		       uint64_t target_offset)
{
	struct drm_device *dev = obj->base.dev;
	uint32_t page_offset = offset_in_page(reloc->offset);
373
	uint64_t delta = relocation_target(reloc, target_offset);
374 375 376 377 378 379 380
	char *vaddr;
	int ret;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

381
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
382 383 384 385 386 387 388 389
				reloc->offset >> PAGE_SHIFT));
	clflush_write32(vaddr + page_offset, lower_32_bits(delta));

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
390
			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
391 392 393 394 395 396 397 398 399 400 401
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
	}

	kunmap_atomic(vaddr);

	return 0;
}

402 403
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
404
				   struct eb_vmas *eb,
405
				   struct drm_i915_gem_relocation_entry *reloc)
406 407 408
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
409
	struct drm_i915_gem_object *target_i915_obj;
410
	struct i915_vma *target_vma;
B
Ben Widawsky 已提交
411
	uint64_t target_offset;
412
	int ret;
413

414
	/* we've already hold a reference to all valid objects */
415 416
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
417
		return -ENOENT;
418 419
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
420

421
	target_offset = gen8_canonical_addr(target_vma->node.start);
422

423 424 425 426
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
427
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
428
		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
429
				    PIN_GLOBAL);
430 431 432
		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
			return ret;
	}
433

434
	/* Validate that the target is in a valid r/w GPU domain */
435
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
436
		DRM_DEBUG("reloc with multiple write domains: "
437 438 439 440 441 442
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
443
		return -EINVAL;
444
	}
445 446
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
447
		DRM_DEBUG("reloc with read/write non-GPU domains: "
448 449 450 451 452 453
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
454
		return -EINVAL;
455 456 457 458 459 460 461 462 463
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
464
		return 0;
465 466

	/* Check that the relocation address is valid... */
467 468
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
469
		DRM_DEBUG("Relocation beyond object bounds: "
470 471 472 473
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
474
		return -EINVAL;
475
	}
476
	if (unlikely(reloc->offset & 3)) {
477
		DRM_DEBUG("Relocation not 4-byte aligned: "
478 479 480
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
481
		return -EINVAL;
482 483
	}

484
	/* We can't wait for rendering with pagefaults disabled */
485
	if (obj->active && pagefault_disabled())
486 487
		return -EFAULT;

488
	if (use_cpu_reloc(obj))
B
Ben Widawsky 已提交
489
		ret = relocate_entry_cpu(obj, reloc, target_offset);
490
	else if (obj->map_and_fenceable)
B
Ben Widawsky 已提交
491
		ret = relocate_entry_gtt(obj, reloc, target_offset);
492
	else if (static_cpu_has(X86_FEATURE_CLFLUSH))
493 494 495 496 497
		ret = relocate_entry_clflush(obj, reloc, target_offset);
	else {
		WARN_ONCE(1, "Impossible case in relocation handling\n");
		ret = -ENODEV;
	}
498

499 500 501
	if (ret)
		return ret;

502 503 504
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

505
	return 0;
506 507 508
}

static int
509 510
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
511
{
512 513
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
514
	struct drm_i915_gem_relocation_entry __user *user_relocs;
515
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
516
	int remain, ret;
517

518
	user_relocs = u64_to_user_ptr(entry->relocs_ptr);
519

520 521 522 523 524 525 526 527 528
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
529 530
			return -EFAULT;

531 532
		do {
			u64 offset = r->presumed_offset;
533

534
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
535 536 537 538
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
539
			    __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
540 541 542 543 544 545
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
546 547 548
	}

	return 0;
549
#undef N_RELOC
550 551 552
}

static int
553 554 555
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
556
{
557
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
558 559 560
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
561
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
562 563 564 565 566 567 568 569
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
570
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
571
{
572
	struct i915_vma *vma;
573 574 575 576 577 578 579 580 581 582
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
583 584
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
585
		if (ret)
586
			break;
587
	}
588
	pagefault_enable();
589

590
	return ret;
591 592
}

593 594 595 596 597 598
static bool only_mappable_for_reloc(unsigned int flags)
{
	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
		__EXEC_OBJECT_NEEDS_MAP;
}

599
static int
600
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
601
				struct intel_engine_cs *engine,
602
				bool *need_reloc)
603
{
604
	struct drm_i915_gem_object *obj = vma->obj;
605
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
606
	uint64_t flags;
607 608
	int ret;

609
	flags = PIN_USER;
610 611 612
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
		flags |= PIN_GLOBAL;

613
	if (!drm_mm_node_allocated(&vma->node)) {
614 615 616 617 618
		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
		 * limit address to the first 4GBs for unflagged objects.
		 */
		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
			flags |= PIN_ZONE_4G;
619 620 621 622
		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
			flags |= PIN_GLOBAL | PIN_MAPPABLE;
		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
623 624
		if (entry->flags & EXEC_OBJECT_PINNED)
			flags |= entry->offset | PIN_OFFSET_FIXED;
625 626
		if ((flags & PIN_MAPPABLE) == 0)
			flags |= PIN_HIGH;
627
	}
628 629

	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
630 631 632 633
	if ((ret == -ENOSPC  || ret == -E2BIG) &&
	    only_mappable_for_reloc(entry->flags))
		ret = i915_gem_object_pin(obj, vma->vm,
					  entry->alignment,
634
					  flags & ~PIN_MAPPABLE);
635 636 637
	if (ret)
		return ret;

638 639
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

640 641 642 643
	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
		ret = i915_gem_object_get_fence(obj);
		if (ret)
			return ret;
644

645 646
		if (i915_gem_object_pin_fence(obj))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
647 648
	}

649 650
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
651 652 653 654 655 656 657 658
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

659
	return 0;
660
}
661

662
static bool
663
need_reloc_mappable(struct i915_vma *vma)
664 665 666
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

667 668 669
	if (entry->relocation_count == 0)
		return false;

670
	if (!vma->is_ggtt)
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
		return false;

	/* See also use_cpu_reloc() */
	if (HAS_LLC(vma->obj->base.dev))
		return false;

	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

	return true;
}

static bool
eb_vma_misplaced(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	struct drm_i915_gem_object *obj = vma->obj;
688

689
	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
690 691 692 693 694

	if (entry->alignment &&
	    vma->node.start & (entry->alignment - 1))
		return true;

695 696 697 698
	if (entry->flags & EXEC_OBJECT_PINNED &&
	    vma->node.start != entry->offset)
		return true;

699 700 701 702
	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

703 704 705 706
	/* avoid costly ping-pong once a batch bo ended up non-mappable */
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
		return !only_mappable_for_reloc(entry->flags);

707 708 709 710
	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

711 712 713
	return false;
}

714
static int
715
i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
716
			    struct list_head *vmas,
717
			    struct i915_gem_context *ctx,
718
			    bool *need_relocs)
719
{
720
	struct drm_i915_gem_object *obj;
721
	struct i915_vma *vma;
722
	struct i915_address_space *vm;
723
	struct list_head ordered_vmas;
724
	struct list_head pinned_vmas;
725
	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
726
	int retry;
727

728
	i915_gem_retire_requests_ring(engine);
729

730 731
	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

732
	INIT_LIST_HEAD(&ordered_vmas);
733
	INIT_LIST_HEAD(&pinned_vmas);
734
	while (!list_empty(vmas)) {
735 736 737
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

738 739 740
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
741

742 743 744
		if (ctx->flags & CONTEXT_NO_ZEROMAP)
			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

745 746
		if (!has_fenced_gpu_access)
			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
747 748 749
		need_fence =
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
750
		need_mappable = need_fence || need_reloc_mappable(vma);
751

752 753 754
		if (entry->flags & EXEC_OBJECT_PINNED)
			list_move_tail(&vma->exec_list, &pinned_vmas);
		else if (need_mappable) {
755
			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
756
			list_move(&vma->exec_list, &ordered_vmas);
757
		} else
758
			list_move_tail(&vma->exec_list, &ordered_vmas);
759

760
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
761
		obj->base.pending_write_domain = 0;
762
	}
763
	list_splice(&ordered_vmas, vmas);
764
	list_splice(&pinned_vmas, vmas);
765 766 767 768 769 770 771 772 773 774

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
775
	 * This avoid unnecessary unbinding of later objects in order to make
776 777 778 779
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
780
		int ret = 0;
781 782

		/* Unbind any ill-fitting objects or pin. */
783 784
		list_for_each_entry(vma, vmas, exec_list) {
			if (!drm_mm_node_allocated(&vma->node))
785 786
				continue;

787
			if (eb_vma_misplaced(vma))
788
				ret = i915_vma_unbind(vma);
789
			else
790 791 792
				ret = i915_gem_execbuffer_reserve_vma(vma,
								      engine,
								      need_relocs);
793
			if (ret)
794 795 796 797
				goto err;
		}

		/* Bind fresh objects */
798 799
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
800
				continue;
801

802 803
			ret = i915_gem_execbuffer_reserve_vma(vma, engine,
							      need_relocs);
804 805
			if (ret)
				goto err;
806 807
		}

808
err:
C
Chris Wilson 已提交
809
		if (ret != -ENOSPC || retry++)
810 811
			return ret;

812 813 814 815
		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

816
		ret = i915_gem_evict_vm(vm, true);
817 818 819 820 821 822 823
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
824
				  struct drm_i915_gem_execbuffer2 *args,
825
				  struct drm_file *file,
826
				  struct intel_engine_cs *engine,
827
				  struct eb_vmas *eb,
828
				  struct drm_i915_gem_exec_object2 *exec,
829
				  struct i915_gem_context *ctx)
830 831
{
	struct drm_i915_gem_relocation_entry *reloc;
832 833
	struct i915_address_space *vm;
	struct i915_vma *vma;
834
	bool need_relocs;
835
	int *reloc_offset;
836
	int i, total, ret;
837
	unsigned count = args->buffer_count;
838

839 840
	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

841
	/* We may process another execbuffer during the unlock... */
842 843 844
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
845
		i915_gem_execbuffer_unreserve_vma(vma);
846
		drm_gem_object_unreference(&vma->obj->base);
847 848
	}

849 850 851 852
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
853
		total += exec[i].relocation_count;
854

855
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
856
	reloc = drm_malloc_ab(total, sizeof(*reloc));
857 858 859
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
860 861 862 863 864 865 866
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
867 868
		u64 invalid_offset = (u64)-1;
		int j;
869

870
		user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
871 872

		if (copy_from_user(reloc+total, user_relocs,
873
				   exec[i].relocation_count * sizeof(*reloc))) {
874 875 876 877 878
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

879 880 881 882 883 884 885 886 887 888
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
889 890 891
			if (__copy_to_user(&user_relocs[j].presumed_offset,
					   &invalid_offset,
					   sizeof(invalid_offset))) {
892 893 894 895 896 897
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

898
		reloc_offset[i] = total;
899
		total += exec[i].relocation_count;
900 901 902 903 904 905 906 907
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

908 909
	/* reacquire the objects */
	eb_reset(eb);
910
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
911 912
	if (ret)
		goto err;
913

914
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
915 916
	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
					  &need_relocs);
917 918 919
	if (ret)
		goto err;

920 921 922 923
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
924 925 926 927 928 929 930 931 932 933 934 935
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
936
	drm_free_large(reloc_offset);
937 938 939 940
	return ret;
}

static int
941
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
942
				struct list_head *vmas)
943
{
944
	const unsigned other_rings = ~intel_engine_flag(req->engine);
945
	struct i915_vma *vma;
946
	int ret;
947

948 949
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
950 951

		if (obj->active & other_rings) {
952
			ret = i915_gem_object_sync(obj, req->engine, &req);
953 954 955
			if (ret)
				return ret;
		}
956 957

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
958
			i915_gem_clflush_object(obj, false);
959 960
	}

961 962
	/* Unconditionally flush any chipset caches (for streaming writes). */
	i915_gem_chipset_flush(req->engine->i915);
963

964 965 966
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
967
	return intel_ring_invalidate_all_caches(req);
968 969
}

970 971
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
972
{
973 974 975
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

C
Chris Wilson 已提交
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
	/* Kernel clipping was a DRI1 misfeature */
	if (exec->num_cliprects || exec->cliprects_ptr)
		return false;

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
991 992 993
}

static int
994 995
validate_exec_list(struct drm_device *dev,
		   struct drm_i915_gem_exec_object2 *exec,
996 997
		   int count)
{
998 999
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1000 1001 1002 1003 1004 1005
	unsigned invalid_flags;
	int i;

	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(dev))
		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1006 1007

	for (i = 0; i < count; i++) {
1008
		char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1009 1010
		int length; /* limited by fault_in_pages_readable() */

1011
		if (exec[i].flags & invalid_flags)
1012 1013
			return -EINVAL;

1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
		 * any non-page-aligned or non-canonical addresses.
		 */
		if (exec[i].flags & EXEC_OBJECT_PINNED) {
			if (exec[i].offset !=
			    gen8_canonical_addr(exec[i].offset & PAGE_MASK))
				return -EINVAL;

			/* From drm_mm perspective address space is continuous,
			 * so from this point we're always using non-canonical
			 * form internally.
			 */
			exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
		}

1029 1030 1031
		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
			return -EINVAL;

1032 1033 1034 1035 1036
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
1037
			return -EINVAL;
1038
		relocs_total += exec[i].relocation_count;
1039 1040 1041

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
1042 1043 1044 1045 1046
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
1047 1048 1049
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

1050
		if (likely(!i915.prefault_disable)) {
1051 1052 1053
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
1054 1055 1056 1057 1058
	}

	return 0;
}

1059
static struct i915_gem_context *
1060
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1061
			  struct intel_engine_cs *engine, const u32 ctx_id)
1062
{
1063
	struct i915_gem_context *ctx = NULL;
1064 1065
	struct i915_ctx_hang_stats *hs;

1066
	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1067 1068
		return ERR_PTR(-EINVAL);

1069
	ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1070
	if (IS_ERR(ctx))
1071
		return ctx;
1072

1073
	hs = &ctx->hang_stats;
1074 1075
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1076
		return ERR_PTR(-EIO);
1077 1078
	}

1079
	return ctx;
1080 1081
}

1082
void
1083
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1084
				   struct drm_i915_gem_request *req)
1085
{
1086
	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1087
	struct i915_vma *vma;
1088

1089
	list_for_each_entry(vma, vmas, exec_list) {
1090
		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1091
		struct drm_i915_gem_object *obj = vma->obj;
1092 1093
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
1094

1095
		obj->dirty = 1; /* be paranoid  */
1096
		obj->base.write_domain = obj->base.pending_write_domain;
1097 1098 1099
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
1100

1101
		i915_vma_move_to_active(vma, req);
1102
		if (obj->base.write_domain) {
1103
			i915_gem_request_assign(&obj->last_write_req, req);
1104

1105
			intel_fb_obj_invalidate(obj, ORIGIN_CS);
1106 1107 1108

			/* update for the implicit flush after a batch */
			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1109
		}
1110
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1111
			i915_gem_request_assign(&obj->last_fenced_req, req);
1112
			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1113
				struct drm_i915_private *dev_priv = engine->i915;
1114 1115 1116 1117
				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
					       &dev_priv->mm.fence_list);
			}
		}
1118

C
Chris Wilson 已提交
1119
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
1120 1121 1122
	}
}

1123
static void
1124
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1125
{
1126
	/* Unconditionally force add_request to emit a full flush. */
1127
	params->engine->gpu_caches_dirty = true;
1128

1129
	/* Add a breadcrumb for the completion of the batch buffer */
1130
	__i915_add_request(params->request, params->batch_obj, true);
1131
}
1132

1133 1134
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
1135
			    struct drm_i915_gem_request *req)
1136
{
1137
	struct intel_engine_cs *engine = req->engine;
1138
	struct drm_i915_private *dev_priv = to_i915(dev);
1139 1140
	int ret, i;

1141
	if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
1142 1143 1144
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1145

1146
	ret = intel_ring_begin(req, 4 * 3);
1147 1148 1149 1150
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
1151 1152 1153
		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(engine, 0);
1154 1155
	}

1156
	intel_ring_advance(engine);
1157 1158 1159 1160

	return 0;
}

1161
static struct drm_i915_gem_object*
1162
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1163 1164 1165 1166 1167
			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
			  struct eb_vmas *eb,
			  struct drm_i915_gem_object *batch_obj,
			  u32 batch_start_offset,
			  u32 batch_len,
1168
			  bool is_master)
1169 1170
{
	struct drm_i915_gem_object *shadow_batch_obj;
1171
	struct i915_vma *vma;
1172 1173
	int ret;

1174
	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1175
						   PAGE_ALIGN(batch_len));
1176 1177 1178
	if (IS_ERR(shadow_batch_obj))
		return shadow_batch_obj;

1179
	ret = i915_parse_cmds(engine,
1180 1181 1182 1183 1184
			      batch_obj,
			      shadow_batch_obj,
			      batch_start_offset,
			      batch_len,
			      is_master);
1185 1186
	if (ret)
		goto err;
1187

1188 1189 1190
	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
	if (ret)
		goto err;
1191

C
Chris Wilson 已提交
1192 1193
	i915_gem_object_unpin_pages(shadow_batch_obj);

1194
	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1195

1196 1197
	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
	vma->exec_entry = shadow_exec_entry;
C
Chris Wilson 已提交
1198
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1199 1200
	drm_gem_object_reference(&shadow_batch_obj->base);
	list_add_tail(&vma->exec_list, &eb->vmas);
1201

1202 1203 1204
	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;

	return shadow_batch_obj;
1205

1206
err:
C
Chris Wilson 已提交
1207
	i915_gem_object_unpin_pages(shadow_batch_obj);
1208 1209 1210 1211
	if (ret == -EACCES) /* unhandled chained batch */
		return batch_obj;
	else
		return ERR_PTR(ret);
1212
}
1213

1214
int
1215
i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1216
			       struct drm_i915_gem_execbuffer2 *args,
1217
			       struct list_head *vmas)
1218
{
1219
	struct drm_device *dev = params->dev;
1220
	struct intel_engine_cs *engine = params->engine;
1221
	struct drm_i915_private *dev_priv = to_i915(dev);
1222
	u64 exec_start, exec_len;
1223 1224
	int instp_mode;
	u32 instp_mask;
C
Chris Wilson 已提交
1225
	int ret;
1226

1227
	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1228
	if (ret)
C
Chris Wilson 已提交
1229
		return ret;
1230

1231
	ret = i915_switch_context(params->request);
1232
	if (ret)
C
Chris Wilson 已提交
1233
		return ret;
1234

1235 1236
	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
	     "%s didn't clear reload\n", engine->name);
1237

1238 1239 1240 1241 1242 1243
	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
	instp_mask = I915_EXEC_CONSTANTS_MASK;
	switch (instp_mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
1244
		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
1245
			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
C
Chris Wilson 已提交
1246
			return -EINVAL;
1247 1248 1249 1250 1251
		}

		if (instp_mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4) {
				DRM_DEBUG("no rel constants on pre-gen4\n");
C
Chris Wilson 已提交
1252
				return -EINVAL;
1253 1254 1255 1256 1257
			}

			if (INTEL_INFO(dev)->gen > 5 &&
			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
C
Chris Wilson 已提交
1258
				return -EINVAL;
1259 1260 1261 1262 1263 1264 1265 1266 1267
			}

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
		}
		break;
	default:
		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
C
Chris Wilson 已提交
1268
		return -EINVAL;
1269 1270
	}

1271
	if (engine == &dev_priv->engine[RCS] &&
C
Chris Wilson 已提交
1272
	    instp_mode != dev_priv->relative_constants_mode) {
1273
		ret = intel_ring_begin(params->request, 4);
1274
		if (ret)
C
Chris Wilson 已提交
1275
			return ret;
1276

1277 1278 1279 1280 1281
		intel_ring_emit(engine, MI_NOOP);
		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit_reg(engine, INSTPM);
		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
		intel_ring_advance(engine);
1282 1283 1284 1285 1286

		dev_priv->relative_constants_mode = instp_mode;
	}

	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1287
		ret = i915_reset_gen7_sol_offsets(dev, params->request);
1288
		if (ret)
C
Chris Wilson 已提交
1289
			return ret;
1290 1291
	}

1292 1293 1294 1295
	exec_len   = args->batch_len;
	exec_start = params->batch_obj_vm_offset +
		     params->args_batch_start_offset;

1296 1297 1298
	if (exec_len == 0)
		exec_len = params->batch_obj->base.size;

1299
	ret = engine->dispatch_execbuffer(params->request,
C
Chris Wilson 已提交
1300 1301 1302 1303
					exec_start, exec_len,
					params->dispatch_flags);
	if (ret)
		return ret;
1304

1305
	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1306

1307
	i915_gem_execbuffer_move_to_active(vmas, params->request);
1308

C
Chris Wilson 已提交
1309
	return 0;
1310 1311
}

1312 1313
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
1314
 * The ring index is returned.
1315
 */
1316 1317
static unsigned int
gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1318 1319 1320
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

1321 1322 1323
	/* Check whether the file_priv has already selected one ring. */
	if ((int)file_priv->bsd_ring < 0) {
		/* If not, use the ping-pong mechanism to select one. */
1324
		mutex_lock(&dev_priv->drm.struct_mutex);
1325 1326
		file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
		dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1327
		mutex_unlock(&dev_priv->drm.struct_mutex);
1328
	}
1329 1330

	return file_priv->bsd_ring;
1331 1332
}

1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
{
	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);

	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
1347 1348
	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1349 1350 1351 1352

	return vma->obj;
}

1353 1354
#define I915_USER_RINGS (4)

1355
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

static int
eb_select_ring(struct drm_i915_private *dev_priv,
	       struct drm_file *file,
	       struct drm_i915_gem_execbuffer2 *args,
	       struct intel_engine_cs **ring)
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
		return -EINVAL;
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
		return -EINVAL;
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
			bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
1390
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1391 1392 1393 1394 1395 1396 1397
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
			return -EINVAL;
		}

1398
		*ring = &dev_priv->engine[_VCS(bsd_idx)];
1399
	} else {
1400
		*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
1401 1402
	}

1403
	if (!intel_engine_initialized(*ring)) {
1404 1405 1406 1407 1408 1409 1410
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
		return -EINVAL;
	}

	return 0;
}

1411 1412 1413 1414
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1415
		       struct drm_i915_gem_exec_object2 *exec)
1416
{
1417 1418
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1419
	struct drm_i915_gem_request *req = NULL;
1420
	struct eb_vmas *eb;
1421
	struct drm_i915_gem_object *batch_obj;
1422
	struct drm_i915_gem_exec_object2 shadow_exec_entry;
1423
	struct intel_engine_cs *engine;
1424
	struct i915_gem_context *ctx;
1425
	struct i915_address_space *vm;
1426 1427
	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
	struct i915_execbuffer_params *params = &params_master;
1428
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1429
	u32 dispatch_flags;
1430
	int ret;
1431
	bool need_relocs;
1432

1433
	if (!i915_gem_check_execbuffer(args))
1434 1435
		return -EINVAL;

1436
	ret = validate_exec_list(dev, exec, args->buffer_count);
1437 1438 1439
	if (ret)
		return ret;

1440
	dispatch_flags = 0;
1441
	if (args->flags & I915_EXEC_SECURE) {
1442
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1443 1444
		    return -EPERM;

1445
		dispatch_flags |= I915_DISPATCH_SECURE;
1446
	}
1447
	if (args->flags & I915_EXEC_IS_PINNED)
1448
		dispatch_flags |= I915_DISPATCH_PINNED;
1449

1450
	ret = eb_select_ring(dev_priv, file, args, &engine);
1451 1452
	if (ret)
		return ret;
1453 1454

	if (args->buffer_count < 1) {
1455
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1456 1457 1458
		return -EINVAL;
	}

1459 1460 1461 1462 1463
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
		if (!HAS_RESOURCE_STREAMER(dev)) {
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
1464
		if (engine->id != RCS) {
1465
			DRM_DEBUG("RS is not available on %s\n",
1466
				 engine->name);
1467 1468 1469 1470 1471 1472
			return -EINVAL;
		}

		dispatch_flags |= I915_DISPATCH_RS;
	}

1473 1474 1475 1476 1477 1478
	/* Take a local wakeref for preparing to dispatch the execbuf as
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
1479 1480
	intel_runtime_pm_get(dev_priv);

1481 1482 1483 1484
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1485
	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1486
	if (IS_ERR(ctx)) {
1487
		mutex_unlock(&dev->struct_mutex);
1488
		ret = PTR_ERR(ctx);
1489
		goto pre_mutex_err;
1490
	}
1491 1492 1493

	i915_gem_context_reference(ctx);

1494 1495 1496
	if (ctx->ppgtt)
		vm = &ctx->ppgtt->base;
	else
1497
		vm = &ggtt->base;
1498

1499 1500
	memset(&params_master, 0x00, sizeof(params_master));

B
Ben Widawsky 已提交
1501
	eb = eb_create(args);
1502
	if (eb == NULL) {
1503
		i915_gem_context_unreference(ctx);
1504 1505 1506 1507 1508
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1509
	/* Look up object handles */
1510
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1511 1512
	if (ret)
		goto err;
1513

1514
	/* take note of the batch buffer before we might reorder the lists */
1515
	batch_obj = eb_get_batch(eb);
1516

1517
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1518
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1519 1520
	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
					  &need_relocs);
1521 1522 1523 1524
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1525
	if (need_relocs)
B
Ben Widawsky 已提交
1526
		ret = i915_gem_execbuffer_relocate(eb);
1527 1528
	if (ret) {
		if (ret == -EFAULT) {
1529 1530
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
								engine,
1531
								eb, exec, ctx);
1532 1533 1534 1535 1536 1537 1538 1539
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1540
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1541 1542 1543 1544
		ret = -EINVAL;
		goto err;
	}

1545
	params->args_batch_start_offset = args->batch_start_offset;
1546
	if (i915_needs_cmd_parser(engine) && args->batch_len) {
1547 1548
		struct drm_i915_gem_object *parsed_batch_obj;

1549 1550 1551 1552 1553 1554
		parsed_batch_obj = i915_gem_execbuffer_parse(engine,
							     &shadow_exec_entry,
							     eb,
							     batch_obj,
							     args->batch_start_offset,
							     args->batch_len,
1555
							     drm_is_current_master(file));
1556 1557
		if (IS_ERR(parsed_batch_obj)) {
			ret = PTR_ERR(parsed_batch_obj);
1558 1559
			goto err;
		}
1560 1561

		/*
1562 1563
		 * parsed_batch_obj == batch_obj means batch not fully parsed:
		 * Accept, but don't promote to secure.
1564 1565
		 */

1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
		if (parsed_batch_obj != batch_obj) {
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
			dispatch_flags |= I915_DISPATCH_SECURE;
1577
			params->args_batch_start_offset = 0;
1578 1579
			batch_obj = parsed_batch_obj;
		}
1580 1581
	}

1582 1583
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1584 1585
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1586
	 * hsw should have this fixed, but bdw mucks it up again. */
1587
	if (dispatch_flags & I915_DISPATCH_SECURE) {
1588 1589 1590 1591 1592 1593
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
1594
		 *   so we don't really have issues with multiple objects not
1595 1596 1597 1598 1599 1600
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
		if (ret)
			goto err;
1601

1602
		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1603
	} else
1604
		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1605

1606
	/* Allocate a request for this batch buffer nice and early. */
1607
	req = i915_gem_request_alloc(engine, ctx);
1608 1609
	if (IS_ERR(req)) {
		ret = PTR_ERR(req);
1610
		goto err_batch_unpin;
1611
	}
1612

1613
	ret = i915_gem_request_add_to_client(req, file);
1614
	if (ret)
1615
		goto err_request;
1616

1617 1618 1619 1620 1621 1622 1623 1624
	/*
	 * Save assorted stuff away to pass through to *_submission().
	 * NB: This data should be 'persistent' and not local as it will
	 * kept around beyond the duration of the IOCTL once the GPU
	 * scheduler arrives.
	 */
	params->dev                     = dev;
	params->file                    = file;
1625
	params->engine                    = engine;
1626 1627 1628
	params->dispatch_flags          = dispatch_flags;
	params->batch_obj               = batch_obj;
	params->ctx                     = ctx;
1629
	params->request                 = req;
1630 1631

	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1632 1633
err_request:
	i915_gem_execbuffer_retire_commands(params);
1634

1635
err_batch_unpin:
1636 1637 1638 1639 1640 1641
	/*
	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
	 * batch vma for correctness. For less ugly and less fragility this
	 * needs to be adjusted to also track the ggtt batch vma properly as
	 * active.
	 */
1642
	if (dispatch_flags & I915_DISPATCH_SECURE)
1643
		i915_gem_object_ggtt_unpin(batch_obj);
1644

1645
err:
1646 1647
	/* the request owns the ref now */
	i915_gem_context_unreference(ctx);
1648
	eb_destroy(eb);
1649 1650 1651 1652

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
1653 1654 1655
	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
	intel_runtime_pm_put(dev_priv);
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1674
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1675 1676 1677 1678 1679 1680 1681
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1682
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1683 1684 1685 1686 1687 1688
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
1689
			     u64_to_user_ptr(args->buffers_ptr),
1690 1691
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1692
		DRM_DEBUG("copy %d exec entries failed %d\n",
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1720
	i915_execbuffer2_set_context_id(exec2, 0);
1721

1722
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1723
	if (!ret) {
1724
		struct drm_i915_gem_exec_object __user *user_exec_list =
1725
			u64_to_user_ptr(args->buffers_ptr);
1726

1727
		/* Copy the new buffer offsets back to the user's exec list. */
1728
		for (i = 0; i < args->buffer_count; i++) {
1729 1730
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user (%d)\n",
					  args->buffer_count, ret);
				break;
			}
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1757 1758
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1759
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1760 1761 1762
		return -EINVAL;
	}

1763 1764 1765 1766 1767
	if (args->rsvd2 != 0) {
		DRM_DEBUG("dirty rvsd2 field\n");
		return -EINVAL;
	}

1768 1769 1770
	exec2_list = drm_malloc_gfp(args->buffer_count,
				    sizeof(*exec2_list),
				    GFP_TEMPORARY);
1771
	if (exec2_list == NULL) {
1772
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1773 1774 1775 1776
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
1777
			     u64_to_user_ptr(args->buffers_ptr),
1778 1779
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1780
		DRM_DEBUG("copy %d exec entries failed %d\n",
1781 1782 1783 1784 1785
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1786
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1787 1788
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
1789
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1790
				   u64_to_user_ptr(args->buffers_ptr);
1791 1792 1793
		int i;

		for (i = 0; i < args->buffer_count; i++) {
1794 1795
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user\n",
					  args->buffer_count);
				break;
			}
1806 1807 1808 1809 1810 1811
		}
	}

	drm_free_large(exec2_list);
	return ret;
}