i915_gem_execbuffer.c 48.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
34
#include <linux/dma_remapping.h>
35
#include <linux/uaccess.h>
36

37 38
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
39
#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
40 41 42
#define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)

#define BATCH_OFFSET_BIAS (256*1024)
43

44 45
struct eb_vmas {
	struct list_head vmas;
46
	int and;
47
	union {
48
		struct i915_vma *lut[0];
49 50
		struct hlist_head buckets[0];
	};
51 52
};

53
static struct eb_vmas *
B
Ben Widawsky 已提交
54
eb_create(struct drm_i915_gem_execbuffer2 *args)
55
{
56
	struct eb_vmas *eb = NULL;
57 58

	if (args->flags & I915_EXEC_HANDLE_LUT) {
59
		unsigned size = args->buffer_count;
60 61
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
62 63 64 65
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
66 67
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
68
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
69 70 71
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
72
			     sizeof(struct eb_vmas),
73 74 75 76 77 78 79 80
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

81
	INIT_LIST_HEAD(&eb->vmas);
82 83 84 85
	return eb;
}

static void
86
eb_reset(struct eb_vmas *eb)
87
{
88 89
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
90 91
}

92
static int
93 94 95 96 97
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
98
{
99 100
	struct drm_i915_gem_object *obj;
	struct list_head objects;
101
	int i, ret;
102

103
	INIT_LIST_HEAD(&objects);
104
	spin_lock(&file->table_lock);
105 106
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
107
	for (i = 0; i < args->buffer_count; i++) {
108 109 110 111 112
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
113
			ret = -ENOENT;
114
			goto err;
115 116
		}

117
		if (!list_empty(&obj->obj_exec_link)) {
118 119 120
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
121
			ret = -EINVAL;
122
			goto err;
123 124 125
		}

		drm_gem_object_reference(&obj->base);
126 127 128
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
129

130
	i = 0;
131
	while (!list_empty(&objects)) {
132
		struct i915_vma *vma;
133

134 135 136 137
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);

138 139 140 141 142 143 144 145
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
146
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
147 148 149
		if (IS_ERR(vma)) {
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
150
			goto err;
151 152
		}

153
		/* Transfer ownership from the objects list to the vmas list. */
154
		list_add_tail(&vma->exec_list, &eb->vmas);
155
		list_del_init(&obj->obj_exec_link);
156 157

		vma->exec_entry = &exec[i];
158
		if (eb->and < 0) {
159
			eb->lut[i] = vma;
160 161
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
162 163
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
164 165
				       &eb->buckets[handle & eb->and]);
		}
166
		++i;
167 168
	}

169
	return 0;
170 171


172
err:
173 174 175 176 177
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
178
		drm_gem_object_unreference(&obj->base);
179
	}
180 181 182 183 184
	/*
	 * Objects already transfered to the vmas list will be unreferenced by
	 * eb_destroy.
	 */

185
	return ret;
186 187
}

188
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
189
{
190 191 192 193 194 195
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
196
		struct i915_vma *vma;
197

198
		head = &eb->buckets[handle & eb->and];
199
		hlist_for_each_entry(vma, head, exec_node) {
200 201
			if (vma->exec_handle == handle)
				return vma;
202 203 204
		}
		return NULL;
	}
205 206
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
222
		vma->pin_count--;
223

C
Chris Wilson 已提交
224
	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
225 226 227 228
}

static void eb_destroy(struct eb_vmas *eb)
{
229 230
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
231

232 233
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
234
				       exec_list);
235
		list_del_init(&vma->exec_list);
236
		i915_gem_execbuffer_unreserve_vma(vma);
237
		drm_gem_object_unreference(&vma->obj->base);
238
	}
239 240 241
	kfree(eb);
}

242 243
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
244 245
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
246 247 248
		obj->cache_level != I915_CACHE_NONE);
}

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
/* Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline uint64_t gen8_canonical_addr(uint64_t address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline uint64_t gen8_noncanonical_addr(uint64_t address)
{
	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
}

static inline uint64_t
relocation_target(struct drm_i915_gem_relocation_entry *reloc,
		  uint64_t target_offset)
{
	return gen8_canonical_addr((int)reloc->delta + target_offset);
}

274 275
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
276 277
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
278
{
279
	struct drm_device *dev = obj->base.dev;
280
	uint32_t page_offset = offset_in_page(reloc->offset);
281
	uint64_t delta = relocation_target(reloc, target_offset);
282
	char *vaddr;
283
	int ret;
284

285
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
286 287 288
	if (ret)
		return ret;

289
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
290
				reloc->offset >> PAGE_SHIFT));
B
Ben Widawsky 已提交
291
	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
292 293 294 295 296 297

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
298
			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
299 300 301
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

B
Ben Widawsky 已提交
302
		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
303 304
	}

305 306 307 308 309 310 311
	kunmap_atomic(vaddr);

	return 0;
}

static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
312 313
		   struct drm_i915_gem_relocation_entry *reloc,
		   uint64_t target_offset)
314 315
{
	struct drm_device *dev = obj->base.dev;
316 317
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
318
	uint64_t delta = relocation_target(reloc, target_offset);
319
	uint64_t offset;
320
	void __iomem *reloc_page;
321
	int ret;
322 323 324 325 326 327 328 329 330 331

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		return ret;

	/* Map the page containing the relocation we're going to perform.  */
332 333
	offset = i915_gem_obj_ggtt_offset(obj);
	offset += reloc->offset;
334
	reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
335 336
					      offset & PAGE_MASK);
	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
337 338

	if (INTEL_INFO(dev)->gen >= 8) {
339
		offset += sizeof(uint32_t);
340

341
		if (offset_in_page(offset) == 0) {
342
			io_mapping_unmap_atomic(reloc_page);
343
			reloc_page =
344
				io_mapping_map_atomic_wc(ggtt->mappable,
345
							 offset);
346 347
		}

348 349
		iowrite32(upper_32_bits(delta),
			  reloc_page + offset_in_page(offset));
350 351
	}

352 353 354 355 356
	io_mapping_unmap_atomic(reloc_page);

	return 0;
}

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
static void
clflush_write32(void *addr, uint32_t value)
{
	/* This is not a fast path, so KISS. */
	drm_clflush_virt_range(addr, sizeof(uint32_t));
	*(uint32_t *)addr = value;
	drm_clflush_virt_range(addr, sizeof(uint32_t));
}

static int
relocate_entry_clflush(struct drm_i915_gem_object *obj,
		       struct drm_i915_gem_relocation_entry *reloc,
		       uint64_t target_offset)
{
	struct drm_device *dev = obj->base.dev;
	uint32_t page_offset = offset_in_page(reloc->offset);
373
	uint64_t delta = relocation_target(reloc, target_offset);
374 375 376 377 378 379 380
	char *vaddr;
	int ret;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

381
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
382 383 384 385 386 387 388 389
				reloc->offset >> PAGE_SHIFT));
	clflush_write32(vaddr + page_offset, lower_32_bits(delta));

	if (INTEL_INFO(dev)->gen >= 8) {
		page_offset = offset_in_page(page_offset + sizeof(uint32_t));

		if (page_offset == 0) {
			kunmap_atomic(vaddr);
390
			vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
391 392 393 394 395 396 397 398 399 400 401
			    (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
		}

		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
	}

	kunmap_atomic(vaddr);

	return 0;
}

402 403
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
404
				   struct eb_vmas *eb,
405
				   struct drm_i915_gem_relocation_entry *reloc)
406 407 408
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
409
	struct drm_i915_gem_object *target_i915_obj;
410
	struct i915_vma *target_vma;
B
Ben Widawsky 已提交
411
	uint64_t target_offset;
412
	int ret;
413

414
	/* we've already hold a reference to all valid objects */
415 416
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
417
		return -ENOENT;
418 419
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
420

421
	target_offset = gen8_canonical_addr(target_vma->node.start);
422

423 424 425 426
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
427
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
428
		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
429
				    PIN_GLOBAL);
430 431 432
		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
			return ret;
	}
433

434
	/* Validate that the target is in a valid r/w GPU domain */
435
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
436
		DRM_DEBUG("reloc with multiple write domains: "
437 438 439 440 441 442
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
443
		return -EINVAL;
444
	}
445 446
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
447
		DRM_DEBUG("reloc with read/write non-GPU domains: "
448 449 450 451 452 453
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
454
		return -EINVAL;
455 456 457 458 459 460 461 462 463
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
464
		return 0;
465 466

	/* Check that the relocation address is valid... */
467 468
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
469
		DRM_DEBUG("Relocation beyond object bounds: "
470 471 472 473
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
474
		return -EINVAL;
475
	}
476
	if (unlikely(reloc->offset & 3)) {
477
		DRM_DEBUG("Relocation not 4-byte aligned: "
478 479 480
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
481
		return -EINVAL;
482 483
	}

484
	/* We can't wait for rendering with pagefaults disabled */
485
	if (obj->active && pagefault_disabled())
486 487
		return -EFAULT;

488
	if (use_cpu_reloc(obj))
B
Ben Widawsky 已提交
489
		ret = relocate_entry_cpu(obj, reloc, target_offset);
490
	else if (obj->map_and_fenceable)
B
Ben Widawsky 已提交
491
		ret = relocate_entry_gtt(obj, reloc, target_offset);
492 493 494 495 496 497
	else if (cpu_has_clflush)
		ret = relocate_entry_clflush(obj, reloc, target_offset);
	else {
		WARN_ONCE(1, "Impossible case in relocation handling\n");
		ret = -ENODEV;
	}
498

499 500 501
	if (ret)
		return ret;

502 503 504
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

505
	return 0;
506 507 508
}

static int
509 510
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
511
{
512 513
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
514
	struct drm_i915_gem_relocation_entry __user *user_relocs;
515
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
516
	int remain, ret;
517

V
Ville Syrjälä 已提交
518
	user_relocs = to_user_ptr(entry->relocs_ptr);
519

520 521 522 523 524 525 526 527 528
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
529 530
			return -EFAULT;

531 532
		do {
			u64 offset = r->presumed_offset;
533

534
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
535 536 537 538 539 540 541 542 543 544 545 546 547
			if (ret)
				return ret;

			if (r->presumed_offset != offset &&
			    __copy_to_user_inatomic(&user_relocs->presumed_offset,
						    &r->presumed_offset,
						    sizeof(r->presumed_offset))) {
				return -EFAULT;
			}

			user_relocs++;
			r++;
		} while (--count);
548 549 550
	}

	return 0;
551
#undef N_RELOC
552 553 554
}

static int
555 556 557
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
558
{
559
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
560 561 562
	int i, ret;

	for (i = 0; i < entry->relocation_count; i++) {
563
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
564 565 566 567 568 569 570 571
		if (ret)
			return ret;
	}

	return 0;
}

static int
B
Ben Widawsky 已提交
572
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
573
{
574
	struct i915_vma *vma;
575 576 577 578 579 580 581 582 583 584
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
585 586
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
587
		if (ret)
588
			break;
589
	}
590
	pagefault_enable();
591

592
	return ret;
593 594
}

595 596 597 598 599 600
static bool only_mappable_for_reloc(unsigned int flags)
{
	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
		__EXEC_OBJECT_NEEDS_MAP;
}

601
static int
602
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
603
				struct intel_engine_cs *engine,
604
				bool *need_reloc)
605
{
606
	struct drm_i915_gem_object *obj = vma->obj;
607
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
608
	uint64_t flags;
609 610
	int ret;

611
	flags = PIN_USER;
612 613 614
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
		flags |= PIN_GLOBAL;

615
	if (!drm_mm_node_allocated(&vma->node)) {
616 617 618 619 620
		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
		 * limit address to the first 4GBs for unflagged objects.
		 */
		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
			flags |= PIN_ZONE_4G;
621 622 623 624
		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
			flags |= PIN_GLOBAL | PIN_MAPPABLE;
		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
625 626
		if (entry->flags & EXEC_OBJECT_PINNED)
			flags |= entry->offset | PIN_OFFSET_FIXED;
627 628
		if ((flags & PIN_MAPPABLE) == 0)
			flags |= PIN_HIGH;
629
	}
630 631

	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
632 633 634 635
	if ((ret == -ENOSPC  || ret == -E2BIG) &&
	    only_mappable_for_reloc(entry->flags))
		ret = i915_gem_object_pin(obj, vma->vm,
					  entry->alignment,
636
					  flags & ~PIN_MAPPABLE);
637 638 639
	if (ret)
		return ret;

640 641
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

642 643 644 645
	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
		ret = i915_gem_object_get_fence(obj);
		if (ret)
			return ret;
646

647 648
		if (i915_gem_object_pin_fence(obj))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
649 650
	}

651 652
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
653 654 655 656 657 658 659 660
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

661
	return 0;
662
}
663

664
static bool
665
need_reloc_mappable(struct i915_vma *vma)
666 667 668
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

669 670 671
	if (entry->relocation_count == 0)
		return false;

672
	if (!vma->is_ggtt)
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
		return false;

	/* See also use_cpu_reloc() */
	if (HAS_LLC(vma->obj->base.dev))
		return false;

	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

	return true;
}

static bool
eb_vma_misplaced(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	struct drm_i915_gem_object *obj = vma->obj;
690

691
	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
692 693 694 695 696

	if (entry->alignment &&
	    vma->node.start & (entry->alignment - 1))
		return true;

697 698 699 700
	if (entry->flags & EXEC_OBJECT_PINNED &&
	    vma->node.start != entry->offset)
		return true;

701 702 703 704
	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

705 706 707 708
	/* avoid costly ping-pong once a batch bo ended up non-mappable */
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
		return !only_mappable_for_reloc(entry->flags);

709 710 711 712
	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

713 714 715
	return false;
}

716
static int
717
i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
718
			    struct list_head *vmas,
719
			    struct intel_context *ctx,
720
			    bool *need_relocs)
721
{
722
	struct drm_i915_gem_object *obj;
723
	struct i915_vma *vma;
724
	struct i915_address_space *vm;
725
	struct list_head ordered_vmas;
726
	struct list_head pinned_vmas;
727
	bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
728
	int retry;
729

730
	i915_gem_retire_requests_ring(engine);
731

732 733
	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

734
	INIT_LIST_HEAD(&ordered_vmas);
735
	INIT_LIST_HEAD(&pinned_vmas);
736
	while (!list_empty(vmas)) {
737 738 739
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

740 741 742
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
743

744 745 746
		if (ctx->flags & CONTEXT_NO_ZEROMAP)
			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

747 748
		if (!has_fenced_gpu_access)
			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
749 750 751
		need_fence =
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
			obj->tiling_mode != I915_TILING_NONE;
752
		need_mappable = need_fence || need_reloc_mappable(vma);
753

754 755 756
		if (entry->flags & EXEC_OBJECT_PINNED)
			list_move_tail(&vma->exec_list, &pinned_vmas);
		else if (need_mappable) {
757
			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
758
			list_move(&vma->exec_list, &ordered_vmas);
759
		} else
760
			list_move_tail(&vma->exec_list, &ordered_vmas);
761

762
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
763
		obj->base.pending_write_domain = 0;
764
	}
765
	list_splice(&ordered_vmas, vmas);
766
	list_splice(&pinned_vmas, vmas);
767 768 769 770 771 772 773 774 775 776

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
777
	 * This avoid unnecessary unbinding of later objects in order to make
778 779 780 781
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
782
		int ret = 0;
783 784

		/* Unbind any ill-fitting objects or pin. */
785 786
		list_for_each_entry(vma, vmas, exec_list) {
			if (!drm_mm_node_allocated(&vma->node))
787 788
				continue;

789
			if (eb_vma_misplaced(vma))
790
				ret = i915_vma_unbind(vma);
791
			else
792 793 794
				ret = i915_gem_execbuffer_reserve_vma(vma,
								      engine,
								      need_relocs);
795
			if (ret)
796 797 798 799
				goto err;
		}

		/* Bind fresh objects */
800 801
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
802
				continue;
803

804 805
			ret = i915_gem_execbuffer_reserve_vma(vma, engine,
							      need_relocs);
806 807
			if (ret)
				goto err;
808 809
		}

810
err:
C
Chris Wilson 已提交
811
		if (ret != -ENOSPC || retry++)
812 813
			return ret;

814 815 816 817
		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

818
		ret = i915_gem_evict_vm(vm, true);
819 820 821 822 823 824 825
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
826
				  struct drm_i915_gem_execbuffer2 *args,
827
				  struct drm_file *file,
828
				  struct intel_engine_cs *engine,
829
				  struct eb_vmas *eb,
830 831
				  struct drm_i915_gem_exec_object2 *exec,
				  struct intel_context *ctx)
832 833
{
	struct drm_i915_gem_relocation_entry *reloc;
834 835
	struct i915_address_space *vm;
	struct i915_vma *vma;
836
	bool need_relocs;
837
	int *reloc_offset;
838
	int i, total, ret;
839
	unsigned count = args->buffer_count;
840

841 842
	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

843
	/* We may process another execbuffer during the unlock... */
844 845 846
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
847
		i915_gem_execbuffer_unreserve_vma(vma);
848
		drm_gem_object_unreference(&vma->obj->base);
849 850
	}

851 852 853 854
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
855
		total += exec[i].relocation_count;
856

857
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
858
	reloc = drm_malloc_ab(total, sizeof(*reloc));
859 860 861
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
862 863 864 865 866 867 868
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
869 870
		u64 invalid_offset = (u64)-1;
		int j;
871

V
Ville Syrjälä 已提交
872
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
873 874

		if (copy_from_user(reloc+total, user_relocs,
875
				   exec[i].relocation_count * sizeof(*reloc))) {
876 877 878 879 880
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

881 882 883 884 885 886 887 888 889 890
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
891 892 893
			if (__copy_to_user(&user_relocs[j].presumed_offset,
					   &invalid_offset,
					   sizeof(invalid_offset))) {
894 895 896 897 898 899
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

900
		reloc_offset[i] = total;
901
		total += exec[i].relocation_count;
902 903 904 905 906 907 908 909
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

910 911
	/* reacquire the objects */
	eb_reset(eb);
912
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
913 914
	if (ret)
		goto err;
915

916
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
917 918
	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
					  &need_relocs);
919 920 921
	if (ret)
		goto err;

922 923 924 925
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
926 927 928 929 930 931 932 933 934 935 936 937
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
938
	drm_free_large(reloc_offset);
939 940 941 942
	return ret;
}

static int
943
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
944
				struct list_head *vmas)
945
{
946
	const unsigned other_rings = ~intel_engine_flag(req->engine);
947
	struct i915_vma *vma;
948
	uint32_t flush_domains = 0;
949
	bool flush_chipset = false;
950
	int ret;
951

952 953
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
954 955

		if (obj->active & other_rings) {
956
			ret = i915_gem_object_sync(obj, req->engine, &req);
957 958 959
			if (ret)
				return ret;
		}
960 961

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
962
			flush_chipset |= i915_gem_clflush_object(obj, false);
963 964

		flush_domains |= obj->base.write_domain;
965 966
	}

967
	if (flush_chipset)
968
		i915_gem_chipset_flush(req->engine->dev);
969 970 971 972

	if (flush_domains & I915_GEM_DOMAIN_GTT)
		wmb();

973 974 975
	/* Unconditionally invalidate gpu caches and ensure that we do flush
	 * any residual writes from the previous batch.
	 */
976
	return intel_ring_invalidate_all_caches(req);
977 978
}

979 980
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
981
{
982 983 984
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

C
Chris Wilson 已提交
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
	/* Kernel clipping was a DRI1 misfeature */
	if (exec->num_cliprects || exec->cliprects_ptr)
		return false;

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1000 1001 1002
}

static int
1003 1004
validate_exec_list(struct drm_device *dev,
		   struct drm_i915_gem_exec_object2 *exec,
1005 1006
		   int count)
{
1007 1008
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1009 1010 1011 1012 1013 1014
	unsigned invalid_flags;
	int i;

	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(dev))
		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1015 1016

	for (i = 0; i < count; i++) {
V
Ville Syrjälä 已提交
1017
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
1018 1019
		int length; /* limited by fault_in_pages_readable() */

1020
		if (exec[i].flags & invalid_flags)
1021 1022
			return -EINVAL;

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
		 * any non-page-aligned or non-canonical addresses.
		 */
		if (exec[i].flags & EXEC_OBJECT_PINNED) {
			if (exec[i].offset !=
			    gen8_canonical_addr(exec[i].offset & PAGE_MASK))
				return -EINVAL;

			/* From drm_mm perspective address space is continuous,
			 * so from this point we're always using non-canonical
			 * form internally.
			 */
			exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
		}

1038 1039 1040
		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
			return -EINVAL;

1041 1042 1043 1044 1045
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
1046
			return -EINVAL;
1047
		relocs_total += exec[i].relocation_count;
1048 1049 1050

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
1051 1052 1053 1054 1055
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
1056 1057 1058
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

1059
		if (likely(!i915.prefault_disable)) {
1060 1061 1062
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
1063 1064 1065 1066 1067
	}

	return 0;
}

1068
static struct intel_context *
1069
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1070
			  struct intel_engine_cs *engine, const u32 ctx_id)
1071
{
1072
	struct intel_context *ctx = NULL;
1073 1074
	struct i915_ctx_hang_stats *hs;

1075
	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1076 1077
		return ERR_PTR(-EINVAL);

1078
	ctx = i915_gem_context_get(file->driver_priv, ctx_id);
1079
	if (IS_ERR(ctx))
1080
		return ctx;
1081

1082
	hs = &ctx->hang_stats;
1083 1084
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1085
		return ERR_PTR(-EIO);
1086 1087
	}

1088
	return ctx;
1089 1090
}

1091
void
1092
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1093
				   struct drm_i915_gem_request *req)
1094
{
1095
	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1096
	struct i915_vma *vma;
1097

1098
	list_for_each_entry(vma, vmas, exec_list) {
1099
		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1100
		struct drm_i915_gem_object *obj = vma->obj;
1101 1102
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
1103

1104
		obj->dirty = 1; /* be paranoid  */
1105
		obj->base.write_domain = obj->base.pending_write_domain;
1106 1107 1108
		if (obj->base.write_domain == 0)
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
1109

1110
		i915_vma_move_to_active(vma, req);
1111
		if (obj->base.write_domain) {
1112
			i915_gem_request_assign(&obj->last_write_req, req);
1113

1114
			intel_fb_obj_invalidate(obj, ORIGIN_CS);
1115 1116 1117

			/* update for the implicit flush after a batch */
			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1118
		}
1119
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1120
			i915_gem_request_assign(&obj->last_fenced_req, req);
1121
			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1122
				struct drm_i915_private *dev_priv = to_i915(engine->dev);
1123 1124 1125 1126
				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
					       &dev_priv->mm.fence_list);
			}
		}
1127

C
Chris Wilson 已提交
1128
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
1129 1130 1131
	}
}

1132
static void
1133
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1134
{
1135
	/* Unconditionally force add_request to emit a full flush. */
1136
	params->engine->gpu_caches_dirty = true;
1137

1138
	/* Add a breadcrumb for the completion of the batch buffer */
1139
	__i915_add_request(params->request, params->batch_obj, true);
1140
}
1141

1142 1143
static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
1144
			    struct drm_i915_gem_request *req)
1145
{
1146
	struct intel_engine_cs *engine = req->engine;
1147
	struct drm_i915_private *dev_priv = dev->dev_private;
1148 1149
	int ret, i;

1150
	if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
1151 1152 1153
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1154

1155
	ret = intel_ring_begin(req, 4 * 3);
1156 1157 1158 1159
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
1160 1161 1162
		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(engine, 0);
1163 1164
	}

1165
	intel_ring_advance(engine);
1166 1167 1168 1169

	return 0;
}

1170
static struct drm_i915_gem_object*
1171
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1172 1173 1174 1175 1176
			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
			  struct eb_vmas *eb,
			  struct drm_i915_gem_object *batch_obj,
			  u32 batch_start_offset,
			  u32 batch_len,
1177
			  bool is_master)
1178 1179
{
	struct drm_i915_gem_object *shadow_batch_obj;
1180
	struct i915_vma *vma;
1181 1182
	int ret;

1183
	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1184
						   PAGE_ALIGN(batch_len));
1185 1186 1187
	if (IS_ERR(shadow_batch_obj))
		return shadow_batch_obj;

1188
	ret = i915_parse_cmds(engine,
1189 1190 1191 1192 1193
			      batch_obj,
			      shadow_batch_obj,
			      batch_start_offset,
			      batch_len,
			      is_master);
1194 1195
	if (ret)
		goto err;
1196

1197 1198 1199
	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
	if (ret)
		goto err;
1200

C
Chris Wilson 已提交
1201 1202
	i915_gem_object_unpin_pages(shadow_batch_obj);

1203
	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1204

1205 1206
	vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
	vma->exec_entry = shadow_exec_entry;
C
Chris Wilson 已提交
1207
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1208 1209
	drm_gem_object_reference(&shadow_batch_obj->base);
	list_add_tail(&vma->exec_list, &eb->vmas);
1210

1211 1212 1213
	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;

	return shadow_batch_obj;
1214

1215
err:
C
Chris Wilson 已提交
1216
	i915_gem_object_unpin_pages(shadow_batch_obj);
1217 1218 1219 1220
	if (ret == -EACCES) /* unhandled chained batch */
		return batch_obj;
	else
		return ERR_PTR(ret);
1221
}
1222

1223
int
1224
i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1225
			       struct drm_i915_gem_execbuffer2 *args,
1226
			       struct list_head *vmas)
1227
{
1228
	struct drm_device *dev = params->dev;
1229
	struct intel_engine_cs *engine = params->engine;
1230
	struct drm_i915_private *dev_priv = dev->dev_private;
1231
	u64 exec_start, exec_len;
1232 1233
	int instp_mode;
	u32 instp_mask;
C
Chris Wilson 已提交
1234
	int ret;
1235

1236
	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1237
	if (ret)
C
Chris Wilson 已提交
1238
		return ret;
1239

1240
	ret = i915_switch_context(params->request);
1241
	if (ret)
C
Chris Wilson 已提交
1242
		return ret;
1243

1244 1245
	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
	     "%s didn't clear reload\n", engine->name);
1246

1247 1248 1249 1250 1251 1252
	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
	instp_mask = I915_EXEC_CONSTANTS_MASK;
	switch (instp_mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
1253
		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
1254
			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
C
Chris Wilson 已提交
1255
			return -EINVAL;
1256 1257 1258 1259 1260
		}

		if (instp_mode != dev_priv->relative_constants_mode) {
			if (INTEL_INFO(dev)->gen < 4) {
				DRM_DEBUG("no rel constants on pre-gen4\n");
C
Chris Wilson 已提交
1261
				return -EINVAL;
1262 1263 1264 1265 1266
			}

			if (INTEL_INFO(dev)->gen > 5 &&
			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
C
Chris Wilson 已提交
1267
				return -EINVAL;
1268 1269 1270 1271 1272 1273 1274 1275 1276
			}

			/* The HW changed the meaning on this bit on gen6 */
			if (INTEL_INFO(dev)->gen >= 6)
				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
		}
		break;
	default:
		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
C
Chris Wilson 已提交
1277
		return -EINVAL;
1278 1279
	}

1280
	if (engine == &dev_priv->engine[RCS] &&
C
Chris Wilson 已提交
1281
	    instp_mode != dev_priv->relative_constants_mode) {
1282
		ret = intel_ring_begin(params->request, 4);
1283
		if (ret)
C
Chris Wilson 已提交
1284
			return ret;
1285

1286 1287 1288 1289 1290
		intel_ring_emit(engine, MI_NOOP);
		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit_reg(engine, INSTPM);
		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
		intel_ring_advance(engine);
1291 1292 1293 1294 1295

		dev_priv->relative_constants_mode = instp_mode;
	}

	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1296
		ret = i915_reset_gen7_sol_offsets(dev, params->request);
1297
		if (ret)
C
Chris Wilson 已提交
1298
			return ret;
1299 1300
	}

1301 1302 1303 1304
	exec_len   = args->batch_len;
	exec_start = params->batch_obj_vm_offset +
		     params->args_batch_start_offset;

1305 1306 1307
	if (exec_len == 0)
		exec_len = params->batch_obj->base.size;

1308
	ret = engine->dispatch_execbuffer(params->request,
C
Chris Wilson 已提交
1309 1310 1311 1312
					exec_start, exec_len,
					params->dispatch_flags);
	if (ret)
		return ret;
1313

1314
	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1315

1316
	i915_gem_execbuffer_move_to_active(vmas, params->request);
1317

C
Chris Wilson 已提交
1318
	return 0;
1319 1320
}

1321 1322
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
1323
 * The ring index is returned.
1324
 */
1325 1326
static unsigned int
gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1327 1328 1329
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

1330 1331 1332 1333 1334 1335 1336
	/* Check whether the file_priv has already selected one ring. */
	if ((int)file_priv->bsd_ring < 0) {
		/* If not, use the ping-pong mechanism to select one. */
		mutex_lock(&dev_priv->dev->struct_mutex);
		file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
		dev_priv->mm.bsd_ring_dispatch_index ^= 1;
		mutex_unlock(&dev_priv->dev->struct_mutex);
1337
	}
1338 1339

	return file_priv->bsd_ring;
1340 1341
}

1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
{
	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);

	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
1356 1357
	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1358 1359 1360 1361

	return vma->obj;
}

1362 1363
#define I915_USER_RINGS (4)

1364
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

static int
eb_select_ring(struct drm_i915_private *dev_priv,
	       struct drm_file *file,
	       struct drm_i915_gem_execbuffer2 *args,
	       struct intel_engine_cs **ring)
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
		return -EINVAL;
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
		return -EINVAL;
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
			bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
1399
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1400 1401 1402 1403 1404 1405 1406
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
			return -EINVAL;
		}

1407
		*ring = &dev_priv->engine[_VCS(bsd_idx)];
1408
	} else {
1409
		*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
1410 1411
	}

1412
	if (!intel_engine_initialized(*ring)) {
1413 1414 1415 1416 1417 1418 1419
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
		return -EINVAL;
	}

	return 0;
}

1420 1421 1422 1423
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1424
		       struct drm_i915_gem_exec_object2 *exec)
1425
{
1426 1427
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1428
	struct drm_i915_gem_request *req = NULL;
1429
	struct eb_vmas *eb;
1430
	struct drm_i915_gem_object *batch_obj;
1431
	struct drm_i915_gem_exec_object2 shadow_exec_entry;
1432
	struct intel_engine_cs *engine;
1433
	struct intel_context *ctx;
1434
	struct i915_address_space *vm;
1435 1436
	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
	struct i915_execbuffer_params *params = &params_master;
1437
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1438
	u32 dispatch_flags;
1439
	int ret;
1440
	bool need_relocs;
1441

1442
	if (!i915_gem_check_execbuffer(args))
1443 1444
		return -EINVAL;

1445
	ret = validate_exec_list(dev, exec, args->buffer_count);
1446 1447 1448
	if (ret)
		return ret;

1449
	dispatch_flags = 0;
1450 1451 1452 1453
	if (args->flags & I915_EXEC_SECURE) {
		if (!file->is_master || !capable(CAP_SYS_ADMIN))
		    return -EPERM;

1454
		dispatch_flags |= I915_DISPATCH_SECURE;
1455
	}
1456
	if (args->flags & I915_EXEC_IS_PINNED)
1457
		dispatch_flags |= I915_DISPATCH_PINNED;
1458

1459
	ret = eb_select_ring(dev_priv, file, args, &engine);
1460 1461
	if (ret)
		return ret;
1462 1463

	if (args->buffer_count < 1) {
1464
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1465 1466 1467
		return -EINVAL;
	}

1468 1469 1470 1471 1472
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
		if (!HAS_RESOURCE_STREAMER(dev)) {
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
1473
		if (engine->id != RCS) {
1474
			DRM_DEBUG("RS is not available on %s\n",
1475
				 engine->name);
1476 1477 1478 1479 1480 1481
			return -EINVAL;
		}

		dispatch_flags |= I915_DISPATCH_RS;
	}

1482 1483
	intel_runtime_pm_get(dev_priv);

1484 1485 1486 1487
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1488
	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1489
	if (IS_ERR(ctx)) {
1490
		mutex_unlock(&dev->struct_mutex);
1491
		ret = PTR_ERR(ctx);
1492
		goto pre_mutex_err;
1493
	}
1494 1495 1496

	i915_gem_context_reference(ctx);

1497 1498 1499
	if (ctx->ppgtt)
		vm = &ctx->ppgtt->base;
	else
1500
		vm = &ggtt->base;
1501

1502 1503
	memset(&params_master, 0x00, sizeof(params_master));

B
Ben Widawsky 已提交
1504
	eb = eb_create(args);
1505
	if (eb == NULL) {
1506
		i915_gem_context_unreference(ctx);
1507 1508 1509 1510 1511
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1512
	/* Look up object handles */
1513
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1514 1515
	if (ret)
		goto err;
1516

1517
	/* take note of the batch buffer before we might reorder the lists */
1518
	batch_obj = eb_get_batch(eb);
1519

1520
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1521
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1522 1523
	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
					  &need_relocs);
1524 1525 1526 1527
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1528
	if (need_relocs)
B
Ben Widawsky 已提交
1529
		ret = i915_gem_execbuffer_relocate(eb);
1530 1531
	if (ret) {
		if (ret == -EFAULT) {
1532 1533
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
								engine,
1534
								eb, exec, ctx);
1535 1536 1537 1538 1539 1540 1541 1542
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
	if (batch_obj->base.pending_write_domain) {
1543
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1544 1545 1546 1547
		ret = -EINVAL;
		goto err;
	}

1548
	params->args_batch_start_offset = args->batch_start_offset;
1549
	if (i915_needs_cmd_parser(engine) && args->batch_len) {
1550 1551
		struct drm_i915_gem_object *parsed_batch_obj;

1552 1553 1554 1555 1556 1557 1558
		parsed_batch_obj = i915_gem_execbuffer_parse(engine,
							     &shadow_exec_entry,
							     eb,
							     batch_obj,
							     args->batch_start_offset,
							     args->batch_len,
							     file->is_master);
1559 1560
		if (IS_ERR(parsed_batch_obj)) {
			ret = PTR_ERR(parsed_batch_obj);
1561 1562
			goto err;
		}
1563 1564

		/*
1565 1566
		 * parsed_batch_obj == batch_obj means batch not fully parsed:
		 * Accept, but don't promote to secure.
1567 1568
		 */

1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
		if (parsed_batch_obj != batch_obj) {
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
			dispatch_flags |= I915_DISPATCH_SECURE;
1580
			params->args_batch_start_offset = 0;
1581 1582
			batch_obj = parsed_batch_obj;
		}
1583 1584
	}

1585 1586
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;

1587 1588
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1589
	 * hsw should have this fixed, but bdw mucks it up again. */
1590
	if (dispatch_flags & I915_DISPATCH_SECURE) {
1591 1592 1593 1594 1595 1596
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
1597
		 *   so we don't really have issues with multiple objects not
1598 1599 1600 1601 1602 1603
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
		if (ret)
			goto err;
1604

1605
		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1606
	} else
1607
		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1608

1609
	/* Allocate a request for this batch buffer nice and early. */
1610
	req = i915_gem_request_alloc(engine, ctx);
1611 1612
	if (IS_ERR(req)) {
		ret = PTR_ERR(req);
1613
		goto err_batch_unpin;
1614
	}
1615

1616
	ret = i915_gem_request_add_to_client(req, file);
1617
	if (ret)
1618
		goto err_request;
1619

1620 1621 1622 1623 1624 1625 1626 1627
	/*
	 * Save assorted stuff away to pass through to *_submission().
	 * NB: This data should be 'persistent' and not local as it will
	 * kept around beyond the duration of the IOCTL once the GPU
	 * scheduler arrives.
	 */
	params->dev                     = dev;
	params->file                    = file;
1628
	params->engine                    = engine;
1629 1630 1631
	params->dispatch_flags          = dispatch_flags;
	params->batch_obj               = batch_obj;
	params->ctx                     = ctx;
1632
	params->request                 = req;
1633 1634

	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1635 1636
err_request:
	i915_gem_execbuffer_retire_commands(params);
1637

1638
err_batch_unpin:
1639 1640 1641 1642 1643 1644
	/*
	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
	 * batch vma for correctness. For less ugly and less fragility this
	 * needs to be adjusted to also track the ggtt batch vma properly as
	 * active.
	 */
1645
	if (dispatch_flags & I915_DISPATCH_SECURE)
1646
		i915_gem_object_ggtt_unpin(batch_obj);
1647

1648
err:
1649 1650
	/* the request owns the ref now */
	i915_gem_context_unreference(ctx);
1651
	eb_destroy(eb);
1652 1653 1654 1655

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
1656 1657 1658
	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
	intel_runtime_pm_put(dev_priv);
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1677
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1678 1679 1680 1681 1682 1683 1684
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1685
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1686 1687 1688 1689 1690 1691
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
V
Ville Syrjälä 已提交
1692
			     to_user_ptr(args->buffers_ptr),
1693 1694
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1695
		DRM_DEBUG("copy %d exec entries failed %d\n",
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1723
	i915_execbuffer2_set_context_id(exec2, 0);
1724

1725
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1726
	if (!ret) {
1727 1728 1729
		struct drm_i915_gem_exec_object __user *user_exec_list =
			to_user_ptr(args->buffers_ptr);

1730
		/* Copy the new buffer offsets back to the user's exec list. */
1731
		for (i = 0; i < args->buffer_count; i++) {
1732 1733
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user (%d)\n",
					  args->buffer_count, ret);
				break;
			}
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1760 1761
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1762
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1763 1764 1765
		return -EINVAL;
	}

1766 1767 1768 1769 1770
	if (args->rsvd2 != 0) {
		DRM_DEBUG("dirty rvsd2 field\n");
		return -EINVAL;
	}

1771 1772 1773
	exec2_list = drm_malloc_gfp(args->buffer_count,
				    sizeof(*exec2_list),
				    GFP_TEMPORARY);
1774
	if (exec2_list == NULL) {
1775
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1776 1777 1778 1779
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
V
Ville Syrjälä 已提交
1780
			     to_user_ptr(args->buffers_ptr),
1781 1782
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1783
		DRM_DEBUG("copy %d exec entries failed %d\n",
1784 1785 1786 1787 1788
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1789
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1790 1791
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
1792
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1793 1794 1795 1796
				   to_user_ptr(args->buffers_ptr);
		int i;

		for (i = 0; i < args->buffer_count; i++) {
1797 1798
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user\n",
					  args->buffer_count);
				break;
			}
1809 1810 1811 1812 1813 1814
		}
	}

	drm_free_large(exec2_list);
	return ret;
}