i915_gem_execbuffer.c 63.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
31
#include <linux/sync_file.h>
32 33
#include <linux/uaccess.h>

34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
36

37
#include "i915_drv.h"
38
#include "i915_gem_clflush.h"
39 40
#include "i915_trace.h"
#include "intel_drv.h"
41
#include "intel_frontbuffer.h"
42

43 44
#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */

45 46 47 48 49 50
#define __EXEC_OBJECT_HAS_REF		BIT(31)
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
51 52 53 54 55
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
#define __EXEC_VALIDATED	BIT(30)
#define UPDATE			PIN_OFFSET_FIXED
56 57

#define BATCH_OFFSET_BIAS (256*1024)
58

59 60
#define __I915_EXEC_ILLEGAL_FLAGS \
	(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

184
struct i915_execbuffer {
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */

	struct intel_engine_cs *engine; /** engine to queue the request to */
	struct i915_gem_context *ctx; /** context for building the request */
	struct i915_address_space *vm; /** GTT and vma for the request */

	struct drm_i915_gem_request *request; /** our request to build */
	struct i915_vma *batch; /** identity of the batch obj/vma */

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
211
	struct reloc_cache {
212 213 214
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
215
		bool use_64bit_reloc : 1;
216 217 218
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
219
	} reloc_cache;
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
235 236
};

237 238 239 240 241 242 243 244
/*
 * As an alternative to creating a hashtable of handle-to-vma for a batch,
 * we used the last available reserved field in the execobject[] and stash
 * a link from the execobj to its vma.
 */
#define __exec_to_vma(ee) (ee)->rsvd2
#define exec_to_vma(ee) u64_to_ptr(struct i915_vma, __exec_to_vma(ee))

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
/*
 * Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline u64 gen8_canonical_addr(u64 address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline u64 gen8_noncanonical_addr(u64 address)
{
	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}

264
static int eb_create(struct i915_execbuffer *eb)
265
{
266 267
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
268

269 270 271 272 273 274 275 276 277 278 279
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
		do {
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
					      GFP_TEMPORARY |
					      __GFP_NORETRY |
					      __GFP_NOWARN);
			if (eb->buckets)
				break;
		} while (--size);

		if (unlikely(!eb->buckets)) {
			eb->buckets = kzalloc(sizeof(struct hlist_head),
					      GFP_TEMPORARY);
			if (unlikely(!eb->buckets))
				return -ENOMEM;
		}
295

296
		eb->lut_size = size;
297
	} else {
298
		eb->lut_size = -eb->buffer_count;
299
	}
300

301
	return 0;
302 303
}

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
		 const struct i915_vma *vma)
{
	if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
		return true;

	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

	if (entry->flags & EXEC_OBJECT_PINNED &&
	    vma->node.start != entry->offset)
		return true;

	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

	if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

	return false;
}

static inline void
eb_pin_vma(struct i915_execbuffer *eb,
	   struct drm_i915_gem_exec_object2 *entry,
	   struct i915_vma *vma)
{
	u64 flags;

339 340 341 342 343 344
	if (vma->node.size)
		flags = vma->node.start;
	else
		flags = entry->offset & PIN_OFFSET_MASK;

	flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
345 346
	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_GTT))
		flags |= PIN_GLOBAL;
347

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
	if (unlikely(i915_vma_pin(vma, 0, 0, flags)))
		return;

	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
		if (unlikely(i915_vma_get_fence(vma))) {
			i915_vma_unpin(vma);
			return;
		}

		if (i915_vma_pin_fence(vma))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
	}

	entry->flags |= __EXEC_OBJECT_HAS_PIN;
}

364 365 366 367
static inline void
__eb_unreserve_vma(struct i915_vma *vma,
		   const struct drm_i915_gem_exec_object2 *entry)
{
368 369
	GEM_BUG_ON(!(entry->flags & __EXEC_OBJECT_HAS_PIN));

370 371 372
	if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
		i915_vma_unpin_fence(vma);

373
	__i915_vma_unpin(vma);
374 375
}

376 377 378
static inline void
eb_unreserve_vma(struct i915_vma *vma,
		 struct drm_i915_gem_exec_object2 *entry)
379
{
380 381
	if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
		return;
382 383

	__eb_unreserve_vma(vma, entry);
384
	entry->flags &= ~__EXEC_OBJECT_RESERVED;
385 386
}

387 388 389 390
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
391
{
392 393
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
394

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
		     entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
412 413
	}

414 415 416 417 418 419 420 421 422 423 424 425 426 427
	if (unlikely(vma->exec_entry)) {
		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
			  entry->handle, (int)(entry - eb->exec));
		return -EINVAL;
	}

	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

	return 0;
428 429
}

430 431 432 433
static int
eb_add_vma(struct i915_execbuffer *eb,
	   struct drm_i915_gem_exec_object2 *entry,
	   struct i915_vma *vma)
434
{
435 436 437 438 439 440 441 442
	int err;

	GEM_BUG_ON(i915_vma_is_closed(vma));

	if (!(eb->args->flags & __EXEC_VALIDATED)) {
		err = eb_validate_vma(eb, entry, vma);
		if (unlikely(err))
			return err;
443 444
	}

445 446
	if (eb->lut_size >= 0) {
		vma->exec_handle = entry->handle;
447
		hlist_add_head(&vma->exec_node,
448 449
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
450
	}
451

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	if (entry->relocation_count)
		list_add_tail(&vma->reloc_link, &eb->relocs);

	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

	/*
	 * Stash a pointer from the vma to execobj, so we can query its flags,
	 * size, alignment etc as provided by the user. Also we stash a pointer
	 * to the vma inside the execobj so that we can use a direct lookup
	 * to find the right target VMA when doing relocations.
	 */
	vma->exec_entry = entry;
474
	__exec_to_vma(entry) = (uintptr_t)vma;
475 476

	err = 0;
477
	eb_pin_vma(eb, entry, vma);
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	if (eb_vma_misplaced(entry, vma)) {
		eb_unreserve_vma(vma, entry);

		list_add_tail(&vma->exec_link, &eb->unbound);
		if (drm_mm_node_allocated(&vma->node))
			err = i915_vma_unbind(vma);
	} else {
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
	}
	return err;
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

	if (DBG_USE_CPU_RELOC)
		return DBG_USE_CPU_RELOC > 0;

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
			  struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	u64 flags;
	int err;

	flags = PIN_USER | PIN_NONBLOCK;
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
		flags |= PIN_GLOBAL;

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
	if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		flags |= PIN_ZONE_4G;

	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
		flags |= PIN_MAPPABLE;

	if (entry->flags & EXEC_OBJECT_PINNED) {
		flags |= entry->offset | PIN_OFFSET_FIXED;
		flags &= ~PIN_NONBLOCK; /* force overlapping PINNED checks */
	} else if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) {
		flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
	}

	err = i915_vma_pin(vma, entry->pad_to_size, entry->alignment, flags);
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

	entry->flags |= __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma));

	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
		err = i915_vma_get_fence(vma);
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

		if (i915_vma_pin_fence(vma))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
	}

	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	struct list_head last;
	struct i915_vma *vma;
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
		list_for_each_entry(vma, &eb->unbound, exec_link) {
			err = eb_reserve_vma(eb, vma);
			if (err)
				break;
		}
		if (err != -ENOSPC)
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
			struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];

			if (entry->flags & EXEC_OBJECT_PINNED &&
			    entry->flags & __EXEC_OBJECT_HAS_PIN)
				continue;

			vma = exec_to_vma(entry);
			eb_unreserve_vma(vma, entry);

			if (entry->flags & EXEC_OBJECT_PINNED)
				list_add(&vma->exec_link, &eb->unbound);
			else if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
				list_add_tail(&vma->exec_link, &eb->unbound);
			else
				list_add_tail(&vma->exec_link, &last);
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
			err = i915_gem_evict_vm(eb->vm);
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
	} while (1);
631
}
632

633
static inline struct hlist_head *
634
ht_head(const  struct i915_gem_context_vma_lut *lut, u32 handle)
635
{
636
	return &lut->ht[hash_32(handle, lut->ht_bits)];
637 638 639
}

static inline bool
640
ht_needs_resize(const struct i915_gem_context_vma_lut *lut)
641
{
642 643
	return (4*lut->ht_count > 3*lut->ht_size ||
		4*lut->ht_count + 1 < lut->ht_size);
644 645
}

646 647
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
648 649 650 651
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
	if (unlikely(IS_ERR(ctx)))
		return PTR_ERR(ctx);

	if (unlikely(i915_gem_context_is_banned(ctx))) {
		DRM_DEBUG("Context %u tried to submit while banned\n",
			  ctx->user_handle);
		return -EIO;
	}

	eb->ctx = i915_gem_context_get(ctx);
	eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;

	eb->context_flags = 0;
	if (ctx->flags & CONTEXT_NO_ZEROMAP)
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
679
{
680
#define INTERMEDIATE BIT(0)
681 682
	const unsigned int count = eb->buffer_count;
	struct i915_gem_context_vma_lut *lut = &eb->ctx->vma_lut;
683
	struct i915_vma *vma;
684 685
	struct idr *idr;
	unsigned int i;
686
	int slow_pass = -1;
687
	int err;
688

689 690
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
691

692 693 694
	if (unlikely(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS))
		flush_work(&lut->resize);
	GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
695 696 697 698 699

	for (i = 0; i < count; i++) {
		__exec_to_vma(&eb->exec[i]) = 0;

		hlist_for_each_entry(vma,
700
				     ht_head(lut, eb->exec[i].handle),
701 702 703 704
				     ctx_node) {
			if (vma->ctx_handle != eb->exec[i].handle)
				continue;

705 706 707
			err = eb_add_vma(eb, &eb->exec[i], vma);
			if (unlikely(err))
				return err;
708 709 710 711 712 713 714 715 716 717

			goto next_vma;
		}

		if (slow_pass < 0)
			slow_pass = i;
next_vma: ;
	}

	if (slow_pass < 0)
718
		goto out;
719

720
	spin_lock(&eb->file->table_lock);
721 722 723 724 725
	/*
	 * Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC
	 */
	idr = &eb->file->object_idr;
726 727
	for (i = slow_pass; i < count; i++) {
		struct drm_i915_gem_object *obj;
728

729 730 731
		if (__exec_to_vma(&eb->exec[i]))
			continue;

732
		obj = to_intel_bo(idr_find(idr, eb->exec[i].handle));
733
		if (unlikely(!obj)) {
734
			spin_unlock(&eb->file->table_lock);
735 736
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				  eb->exec[i].handle, i);
737 738
			err = -ENOENT;
			goto err;
739 740
		}

741
		__exec_to_vma(&eb->exec[i]) = INTERMEDIATE | (uintptr_t)obj;
742
	}
743
	spin_unlock(&eb->file->table_lock);
744

745 746
	for (i = slow_pass; i < count; i++) {
		struct drm_i915_gem_object *obj;
747

748
		if (!(__exec_to_vma(&eb->exec[i]) & INTERMEDIATE))
749
			continue;
750

751 752 753 754 755 756 757 758
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
759
		obj = u64_to_ptr(typeof(*obj),
760
				 __exec_to_vma(&eb->exec[i]) & ~INTERMEDIATE);
761
		vma = i915_vma_instance(obj, eb->vm, NULL);
C
Chris Wilson 已提交
762
		if (unlikely(IS_ERR(vma))) {
763
			DRM_DEBUG("Failed to lookup VMA\n");
764 765
			err = PTR_ERR(vma);
			goto err;
766 767
		}

768 769 770 771 772
		/* First come, first served */
		if (!vma->ctx) {
			vma->ctx = eb->ctx;
			vma->ctx_handle = eb->exec[i].handle;
			hlist_add_head(&vma->ctx_node,
773 774 775
				       ht_head(lut, eb->exec[i].handle));
			lut->ht_count++;
			lut->ht_size |= I915_CTX_RESIZE_IN_PROGRESS;
776 777 778 779
			if (i915_vma_is_ggtt(vma)) {
				GEM_BUG_ON(obj->vma_hashed);
				obj->vma_hashed = vma;
			}
780 781

			i915_vma_get(vma);
782
		}
783

784 785 786
		err = eb_add_vma(eb, &eb->exec[i], vma);
		if (unlikely(err))
			goto err;
787 788 789 790 791 792

		/* Only after we validated the user didn't use our bits */
		if (vma->ctx != eb->ctx) {
			i915_vma_get(vma);
			eb->exec[i].flags |= __EXEC_OBJECT_HAS_REF;
		}
793 794
	}

795 796 797 798 799
	if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
		if (ht_needs_resize(lut))
			queue_work(system_highpri_wq, &lut->resize);
		else
			lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
800 801
	}

802 803 804 805
out:
	/* take note of the batch buffer before we might reorder the lists */
	i = eb_batch_index(eb);
	eb->batch = exec_to_vma(&eb->exec[i]);
806

807
	/*
808 809 810 811 812 813 814
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
815
	 */
816 817 818 819
	if (!(eb->exec[i].flags & EXEC_OBJECT_PINNED))
		eb->exec[i].flags |= __EXEC_OBJECT_NEEDS_BIAS;
	if (eb->reloc_cache.has_fence)
		eb->exec[i].flags |= EXEC_OBJECT_NEEDS_FENCE;
820

821 822 823 824 825 826 827 828 829 830 831
	eb->args->flags |= __EXEC_VALIDATED;
	return eb_reserve(eb);

err:
	for (i = slow_pass; i < count; i++) {
		if (__exec_to_vma(&eb->exec[i]) & INTERMEDIATE)
			__exec_to_vma(&eb->exec[i]) = 0;
	}
	lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
	return err;
#undef INTERMEDIATE
832 833
}

834
static struct i915_vma *
835
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
836
{
837 838
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
839
			return NULL;
840
		return exec_to_vma(&eb->exec[handle]);
841 842
	} else {
		struct hlist_head *head;
843
		struct i915_vma *vma;
844

845
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
846
		hlist_for_each_entry(vma, head, exec_node) {
847 848
			if (vma->exec_handle == handle)
				return vma;
849 850 851
		}
		return NULL;
	}
852 853
}

854
static void eb_release_vmas(const struct i915_execbuffer *eb)
855
{
856 857 858 859 860 861
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
		struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
		struct i915_vma *vma = exec_to_vma(entry);
862

863
		if (!vma)
864
			continue;
865

866
		GEM_BUG_ON(vma->exec_entry != entry);
867
		vma->exec_entry = NULL;
868

869 870 871 872 873
		if (entry->flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, entry);

		if (entry->flags & __EXEC_OBJECT_HAS_REF)
			i915_vma_put(vma);
874

875 876
		entry->flags &=
			~(__EXEC_OBJECT_RESERVED | __EXEC_OBJECT_HAS_REF);
877
	}
878 879
}

880
static void eb_reset_vmas(const struct i915_execbuffer *eb)
881
{
882 883 884 885
	eb_release_vmas(eb);
	if (eb->lut_size >= 0)
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_size);
886 887
}

888
static void eb_destroy(const struct i915_execbuffer *eb)
889
{
890 891
	if (eb->lut_size >= 0)
		kfree(eb->buckets);
892 893
}

894
static inline u64
895
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
896
		  const struct i915_vma *target)
897
{
898
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
899 900
}

901 902
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
903
{
904
	cache->page = -1;
905
	cache->vaddr = 0;
906
	/* Must be a variable in the struct to allow GCC to unroll. */
907 908 909
	cache->has_llc = HAS_LLC(i915);
	cache->has_fence = INTEL_GEN(i915) < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
910
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
911
	cache->node.allocated = false;
912
}
913

914 915 916 917 918 919 920 921
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
922 923
}

924 925
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

926 927 928 929 930 931 932 933
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

static void reloc_cache_reset(struct reloc_cache *cache)
934
{
935
	void *vaddr;
936

937 938
	if (!cache->vaddr)
		return;
939

940 941 942 943
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
944

945 946 947
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
948
		wmb();
949
		io_mapping_unmap_atomic((void __iomem *)vaddr);
950
		if (cache->node.allocated) {
951
			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
952 953 954

			ggtt->base.clear_range(&ggtt->base,
					       cache->node.start,
955
					       cache->node.size);
956 957 958
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
959
		}
960
	}
961 962 963

	cache->vaddr = 0;
	cache->page = -1;
964 965 966 967
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
968
			unsigned long page)
969
{
970 971 972 973 974 975
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
976
		int err;
977

978 979 980
		err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
		if (err)
			return ERR_PTR(err);
981 982 983

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
984

985 986 987 988
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
989 990
	}

991 992
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
993
	cache->page = page;
994

995
	return vaddr;
996 997
}

998 999
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1000
			 unsigned long page)
1001
{
1002
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1003
	unsigned long offset;
1004
	void *vaddr;
1005

1006
	if (cache->vaddr) {
1007
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1008 1009
	} else {
		struct i915_vma *vma;
1010
		int err;
1011

1012
		if (use_cpu_reloc(cache, obj))
1013
			return NULL;
1014

1015 1016 1017
		err = i915_gem_object_set_to_gtt_domain(obj, true);
		if (err)
			return ERR_PTR(err);
1018

1019 1020
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
					       PIN_MAPPABLE | PIN_NONBLOCK);
1021 1022
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1023
			err = drm_mm_insert_node_in_range
1024
				(&ggtt->base.mm, &cache->node,
1025
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1026
				 0, ggtt->mappable_end,
1027
				 DRM_MM_INSERT_LOW);
1028
			if (err) /* no inactive aperture space, use cpu reloc */
1029
				return NULL;
1030
		} else {
1031 1032
			err = i915_vma_put_fence(vma);
			if (err) {
1033
				i915_vma_unpin(vma);
1034
				return ERR_PTR(err);
1035
			}
1036

1037 1038
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1039
		}
1040
	}
1041

1042 1043
	offset = cache->node.start;
	if (cache->node.allocated) {
1044
		wmb();
1045 1046 1047 1048 1049
		ggtt->base.insert_page(&ggtt->base,
				       i915_gem_object_get_dma_address(obj, page),
				       offset, I915_CACHE_NONE, 0);
	} else {
		offset += page << PAGE_SHIFT;
1050 1051
	}

1052 1053
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
							 offset);
1054 1055
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1056

1057
	return vaddr;
1058 1059
}

1060 1061
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1062
			 unsigned long page)
1063
{
1064
	void *vaddr;
1065

1066 1067 1068 1069 1070 1071 1072 1073
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1074 1075
	}

1076
	return vaddr;
1077 1078
}

1079
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1080
{
1081 1082 1083 1084 1085
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1086

1087
		*addr = value;
1088

1089 1090
		/*
		 * Writes to the same cacheline are serialised by the CPU
1091 1092 1093 1094 1095 1096 1097 1098 1099
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1100 1101
}

1102 1103
static u64
relocate_entry(struct i915_vma *vma,
1104
	       const struct drm_i915_gem_relocation_entry *reloc,
1105 1106
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1107
{
1108
	struct drm_i915_gem_object *obj = vma->obj;
1109
	u64 offset = reloc->offset;
1110 1111
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1112
	void *vaddr;
1113

1114
repeat:
1115
	vaddr = reloc_vaddr(obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1116 1117 1118 1119 1120
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1121
			eb->reloc_cache.vaddr);
1122 1123 1124 1125 1126 1127

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1128 1129
	}

1130
	return target->node.start | UPDATE;
1131 1132
}

1133 1134 1135 1136
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
		  struct i915_vma *vma,
		  const struct drm_i915_gem_relocation_entry *reloc)
1137
{
1138
	struct i915_vma *target;
1139
	int err;
1140

1141
	/* we've already hold a reference to all valid objects */
1142 1143
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1144
		return -ENOENT;
1145

1146
	/* Validate that the target is in a valid r/w GPU domain */
1147
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1148
		DRM_DEBUG("reloc with multiple write domains: "
1149
			  "target %d offset %d "
1150
			  "read %08x write %08x",
1151
			  reloc->target_handle,
1152 1153 1154
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1155
		return -EINVAL;
1156
	}
1157 1158
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1159
		DRM_DEBUG("reloc with read/write non-GPU domains: "
1160
			  "target %d offset %d "
1161
			  "read %08x write %08x",
1162
			  reloc->target_handle,
1163 1164 1165
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1166
		return -EINVAL;
1167 1168
	}

1169
	if (reloc->write_domain) {
1170 1171
		target->exec_entry->flags |= EXEC_OBJECT_WRITE;

1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
		    IS_GEN6(eb->i915)) {
			err = i915_vma_bind(target, target->obj->cache_level,
					    PIN_GLOBAL);
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1186
	}
1187

1188 1189
	/*
	 * If the relocation already has the right value in it, no
1190 1191
	 * more work needs to be done.
	 */
1192
	if (gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1193
		return 0;
1194 1195

	/* Check that the relocation address is valid... */
1196
	if (unlikely(reloc->offset >
1197
		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1198
		DRM_DEBUG("Relocation beyond object bounds: "
1199 1200 1201 1202
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
			  (int)vma->size);
1203
		return -EINVAL;
1204
	}
1205
	if (unlikely(reloc->offset & 3)) {
1206
		DRM_DEBUG("Relocation not 4-byte aligned: "
1207 1208 1209
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1210
		return -EINVAL;
1211 1212
	}

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
	 * of our synchronisation.
	 */
	vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;

1223
	/* and update the user's relocation entry */
1224
	return relocate_entry(vma, reloc, eb, target);
1225 1226
}

1227
static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1228
{
1229
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1230 1231 1232 1233
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	unsigned int remain;
1234

1235
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1236
	remain = entry->relocation_count;
1237 1238
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1239

1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
	if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1253

1254 1255
		/*
		 * This is the fast path and we cannot handle a pagefault
1256 1257 1258 1259 1260 1261 1262
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1263
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1264
		pagefault_enable();
1265 1266
		if (unlikely(copied)) {
			remain = -EFAULT;
1267 1268
			goto out;
		}
1269

1270
		remain -= count;
1271
		do {
1272
			u64 offset = eb_relocate_entry(eb, vma, r);
1273

1274 1275 1276
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1277
				goto out;
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
				__put_user(offset,
					   &urelocs[r-stack].presumed_offset);
1303
			}
1304 1305 1306
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1307
out:
1308
	reloc_cache_reset(&eb->reloc_cache);
1309
	return remain;
1310 1311 1312
}

static int
1313
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1314
{
1315
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1316 1317 1318 1319
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1320 1321

	for (i = 0; i < entry->relocation_count; i++) {
1322
		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1323

1324 1325 1326 1327
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1328
	}
1329 1330 1331 1332
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1333 1334
}

1335
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1336
{
1337 1338 1339
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1340

1341 1342 1343
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1344

1345 1346
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1347

1348 1349 1350 1351
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
	if (!access_ok(VERIFY_READ, addr, size))
		return -EFAULT;
1352

1353 1354 1355 1356 1357
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1358
	}
1359
	return __get_user(c, end - 1);
1360
}
1361

1362
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1363
{
1364 1365 1366
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1367

1368 1369 1370 1371 1372 1373
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		struct drm_i915_gem_relocation_entry *relocs;
		unsigned long size;
		unsigned long copied;
1374

1375 1376
		if (nreloc == 0)
			continue;
1377

1378 1379 1380
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1381

1382 1383
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1384

1385 1386 1387 1388 1389 1390
		relocs = kvmalloc_array(size, 1, GFP_TEMPORARY);
		if (!relocs) {
			kvfree(relocs);
			err = -ENOMEM;
			goto err;
		}
1391

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
					     (char *)urelocs + copied,
					     len)) {
				kvfree(relocs);
				err = -EFAULT;
				goto err;
			}
1405

1406 1407
			copied += len;
		} while (copied < size);
1408

1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		user_access_begin();
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
end_user:
		user_access_end();
1426

1427 1428
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1429

1430
	return 0;
1431

1432 1433 1434 1435 1436 1437 1438 1439
err:
	while (i--) {
		struct drm_i915_gem_relocation_entry *relocs =
			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1440 1441
}

1442
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1443
{
1444 1445
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1446

1447 1448
	if (unlikely(i915.prefault_disable))
		return 0;
1449

1450 1451
	for (i = 0; i < count; i++) {
		int err;
1452

1453 1454 1455 1456
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1457

1458
	return 0;
1459 1460
}

1461
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1462
{
1463
	struct drm_device *dev = &eb->i915->drm;
1464
	bool have_copy = false;
1465
	struct i915_vma *vma;
1466 1467 1468 1469 1470 1471 1472
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1473

1474
	/* We may process another execbuffer during the unlock... */
1475
	eb_reset_vmas(eb);
1476 1477
	mutex_unlock(&dev->struct_mutex);

1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1499
	}
1500 1501 1502
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1503 1504
	}

1505 1506 1507
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1508 1509
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1510
		mutex_lock(&dev->struct_mutex);
1511
		goto out;
1512 1513
	}

1514
	/* reacquire the objects */
1515 1516
	err = eb_lookup_vmas(eb);
	if (err)
1517
		goto err;
1518

1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
	list_for_each_entry(vma, &eb->relocs, reloc_link) {
		if (!have_copy) {
			pagefault_disable();
			err = eb_relocate_vma(eb, vma);
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
			err = eb_relocate_vma_slow(eb, vma);
			if (err)
				goto err;
		}
1531 1532
	}

1533 1534
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1535 1536 1537 1538 1539 1540
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

	return err ?: have_copy;
1563 1564
}

1565
static int eb_relocate(struct i915_execbuffer *eb)
1566
{
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609
	if (eb_lookup_vmas(eb))
		goto slow;

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, vma))
				goto slow;
		}
	}

	return 0;

slow:
	return eb_relocate_slow(eb);
}

static void eb_export_fence(struct drm_i915_gem_object *obj,
			    struct drm_i915_gem_request *req,
			    unsigned int flags)
{
	struct reservation_object *resv = obj->resv;

	/*
	 * Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
	reservation_object_lock(resv, NULL);
	if (flags & EXEC_OBJECT_WRITE)
		reservation_object_add_excl_fence(resv, &req->fence);
	else if (reservation_object_reserve_shared(resv) == 0)
		reservation_object_add_shared_fence(resv, &req->fence);
	reservation_object_unlock(resv);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1610

1611 1612 1613
	for (i = 0; i < count; i++) {
		const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
		struct i915_vma *vma = exec_to_vma(entry);
1614
		struct drm_i915_gem_object *obj = vma->obj;
1615

1616
		if (entry->flags & EXEC_OBJECT_CAPTURE) {
1617 1618 1619 1620 1621 1622
			struct i915_gem_capture_list *capture;

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (unlikely(!capture))
				return -ENOMEM;

1623
			capture->next = eb->request->capture_list;
1624
			capture->vma = vma;
1625
			eb->request->capture_list = capture;
1626 1627
		}

1628 1629
		if (entry->flags & EXEC_OBJECT_ASYNC)
			goto skip_flushes;
1630

1631
		if (unlikely(obj->cache_dirty && !obj->cache_coherent))
1632 1633
			i915_gem_clflush_object(obj, 0);

1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
		err = i915_gem_request_await_object
			(eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
		if (err)
			return err;

skip_flushes:
		i915_vma_move_to_active(vma, eb->request, entry->flags);
		__eb_unreserve_vma(vma, entry);
		vma->exec_entry = NULL;
	}

	for (i = 0; i < count; i++) {
		const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
		struct i915_vma *vma = exec_to_vma(entry);

		eb_export_fence(vma->obj, eb->request, entry->flags);
1650 1651
		if (unlikely(entry->flags & __EXEC_OBJECT_HAS_REF))
			i915_vma_put(vma);
1652
	}
1653
	eb->exec = NULL;
1654

1655
	/* Unconditionally flush any chipset caches (for streaming writes). */
1656
	i915_gem_chipset_flush(eb->i915);
1657

1658
	/* Unconditionally invalidate GPU caches and TLBs. */
1659
	return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
1660 1661
}

1662
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1663
{
1664
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1665 1666
		return false;

C
Chris Wilson 已提交
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
	/* Kernel clipping was a DRI1 misfeature */
	if (exec->num_cliprects || exec->cliprects_ptr)
		return false;

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1682 1683
}

1684 1685 1686 1687 1688 1689 1690
void i915_vma_move_to_active(struct i915_vma *vma,
			     struct drm_i915_gem_request *req,
			     unsigned int flags)
{
	struct drm_i915_gem_object *obj = vma->obj;
	const unsigned int idx = req->engine->id;

1691
	lockdep_assert_held(&req->i915->drm.struct_mutex);
1692 1693
	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

1694 1695
	/*
	 * Add a reference if we're newly entering the active list.
1696 1697 1698 1699 1700 1701
	 * The order in which we add operations to the retirement queue is
	 * vital here: mark_active adds to the start of the callback list,
	 * such that subsequent callbacks are called first. Therefore we
	 * add the active reference first and queue for it to be dropped
	 * *last*.
	 */
1702 1703 1704 1705 1706
	if (!i915_vma_is_active(vma))
		obj->active_count++;
	i915_vma_set_active(vma, idx);
	i915_gem_active_set(&vma->last_read[idx], req);
	list_move_tail(&vma->vm_link, &vma->vm->active_list);
1707

1708
	obj->base.write_domain = 0;
1709
	if (flags & EXEC_OBJECT_WRITE) {
1710 1711
		obj->base.write_domain = I915_GEM_DOMAIN_RENDER;

1712 1713
		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
			i915_gem_active_set(&obj->frontbuffer_write, req);
1714

1715
		obj->base.read_domains = 0;
1716
	}
1717
	obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
1718

1719 1720
	if (flags & EXEC_OBJECT_NEEDS_FENCE)
		i915_gem_active_set(&vma->last_fence, req);
1721 1722
}

1723
static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1724
{
1725 1726
	u32 *cs;
	int i;
1727

1728
	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1729 1730 1731
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1732

1733
	cs = intel_ring_begin(req, 4 * 2 + 2);
1734 1735
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1736

1737
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1738
	for (i = 0; i < 4; i++) {
1739 1740
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1741
	}
1742
	*cs++ = MI_NOOP;
1743
	intel_ring_advance(req, cs);
1744 1745 1746 1747

	return 0;
}

1748
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1749 1750
{
	struct drm_i915_gem_object *shadow_batch_obj;
1751
	struct i915_vma *vma;
1752
	int err;
1753

1754 1755
	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
1756
	if (IS_ERR(shadow_batch_obj))
1757
		return ERR_CAST(shadow_batch_obj);
1758

1759
	err = intel_engine_cmd_parser(eb->engine,
1760
				      eb->batch->obj,
1761
				      shadow_batch_obj,
1762 1763
				      eb->batch_start_offset,
				      eb->batch_len,
1764
				      is_master);
1765 1766
	if (err) {
		if (err == -EACCES) /* unhandled chained batch */
C
Chris Wilson 已提交
1767 1768
			vma = NULL;
		else
1769
			vma = ERR_PTR(err);
C
Chris Wilson 已提交
1770 1771
		goto out;
	}
1772

C
Chris Wilson 已提交
1773 1774 1775
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
1776

1777
	vma->exec_entry =
1778 1779
		memset(&eb->exec[eb->buffer_count++],
		       0, sizeof(*vma->exec_entry));
1780
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
1781
	__exec_to_vma(vma->exec_entry) = (uintptr_t)i915_vma_get(vma);
1782

C
Chris Wilson 已提交
1783
out:
C
Chris Wilson 已提交
1784
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
1785
	return vma;
1786
}
1787

1788
static void
1789
add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
1790 1791 1792 1793 1794
{
	req->file_priv = file->driver_priv;
	list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
}

1795
static int eb_submit(struct i915_execbuffer *eb)
1796
{
1797
	int err;
1798

1799 1800 1801
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
1802

1803 1804 1805
	err = i915_switch_context(eb->request);
	if (err)
		return err;
1806

1807
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
1808 1809 1810
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
1811 1812
	}

1813
	err = eb->engine->emit_bb_start(eb->request,
1814 1815 1816
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
1817 1818 1819
					eb->batch_flags);
	if (err)
		return err;
1820

C
Chris Wilson 已提交
1821
	return 0;
1822 1823
}

1824 1825
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
1826
 * The engine index is returned.
1827
 */
1828
static unsigned int
1829 1830
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
1831 1832 1833
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

1834
	/* Check whether the file_priv has already selected one ring. */
1835 1836 1837
	if ((int)file_priv->bsd_engine < 0)
		file_priv->bsd_engine = atomic_fetch_xor(1,
			 &dev_priv->mm.bsd_engine_dispatch_index);
1838

1839
	return file_priv->bsd_engine;
1840 1841
}

1842 1843
#define I915_USER_RINGS (4)

1844
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1845 1846 1847 1848 1849 1850 1851
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

1852 1853 1854 1855
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
1856 1857
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1858
	struct intel_engine_cs *engine;
1859 1860 1861

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1862
		return NULL;
1863 1864 1865 1866 1867 1868
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
1869
		return NULL;
1870 1871 1872 1873 1874 1875
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1876
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
1877 1878
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
1879
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1880 1881 1882 1883
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
1884
			return NULL;
1885 1886
		}

1887
		engine = dev_priv->engine[_VCS(bsd_idx)];
1888
	} else {
1889
		engine = dev_priv->engine[user_ring_map[user_ring_id]];
1890 1891
	}

1892
	if (!engine) {
1893
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1894
		return NULL;
1895 1896
	}

1897
	return engine;
1898 1899
}

1900
static int
1901
i915_gem_do_execbuffer(struct drm_device *dev,
1902 1903
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1904
		       struct drm_i915_gem_exec_object2 *exec)
1905
{
1906
	struct i915_execbuffer eb;
1907 1908 1909
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
1910
	int err;
1911

1912 1913
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1914

1915 1916 1917
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
1918 1919
	if (!(args->flags & I915_EXEC_NO_RELOC))
		args->flags |= __EXEC_HAS_RELOC;
1920
	eb.exec = exec;
1921 1922 1923 1924
	eb.ctx = NULL;
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(eb.i915))
		eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1925 1926
	reloc_cache_init(&eb.reloc_cache, eb.i915);

1927
	eb.buffer_count = args->buffer_count;
1928 1929 1930
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

1931
	eb.batch_flags = 0;
1932
	if (args->flags & I915_EXEC_SECURE) {
1933
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1934 1935
		    return -EPERM;

1936
		eb.batch_flags |= I915_DISPATCH_SECURE;
1937
	}
1938
	if (args->flags & I915_EXEC_IS_PINNED)
1939
		eb.batch_flags |= I915_DISPATCH_PINNED;
1940

1941 1942
	eb.engine = eb_select_engine(eb.i915, file, args);
	if (!eb.engine)
1943 1944
		return -EINVAL;

1945
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1946
		if (!HAS_RESOURCE_STREAMER(eb.i915)) {
1947 1948 1949
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
1950
		if (eb.engine->id != RCS) {
1951
			DRM_DEBUG("RS is not available on %s\n",
1952
				 eb.engine->name);
1953 1954 1955
			return -EINVAL;
		}

1956
		eb.batch_flags |= I915_DISPATCH_RS;
1957 1958
	}

1959 1960
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
1961 1962
		if (!in_fence)
			return -EINVAL;
1963 1964 1965 1966 1967
	}

	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
1968
			err = out_fence_fd;
1969
			goto err_in_fence;
1970 1971 1972
		}
	}

1973 1974 1975 1976 1977
	if (eb_create(&eb))
		return -ENOMEM;

	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
1978 1979 1980 1981 1982
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
1983
	intel_runtime_pm_get(eb.i915);
1984 1985 1986
	err = i915_mutex_lock_interruptible(dev);
	if (err)
		goto err_rpm;
1987

1988 1989 1990
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_unlock;
1991

1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
	err = eb_relocate(&eb);
	if (err)
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
	if (err < 0)
		goto err_vma;
2004

2005
	if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
2006
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2007 2008
		err = -EINVAL;
		goto err_vma;
2009
	}
2010 2011
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2012
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2013 2014
		err = -EINVAL;
		goto err_vma;
2015
	}
2016

2017
	if (eb.engine->needs_cmd_parser && eb.batch_len) {
2018 2019
		struct i915_vma *vma;

2020
		vma = eb_parse(&eb, drm_is_current_master(file));
2021
		if (IS_ERR(vma)) {
2022 2023
			err = PTR_ERR(vma);
			goto err_vma;
2024
		}
2025

2026
		if (vma) {
2027 2028 2029 2030 2031 2032 2033 2034 2035
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
2036
			eb.batch_flags |= I915_DISPATCH_SECURE;
2037 2038
			eb.batch_start_offset = 0;
			eb.batch = vma;
2039
		}
2040 2041
	}

2042 2043
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
2044

2045 2046
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2047
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2048
	 * hsw should have this fixed, but bdw mucks it up again. */
2049
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2050
		struct i915_vma *vma;
2051

2052 2053 2054 2055 2056 2057
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2058
		 *   so we don't really have issues with multiple objects not
2059 2060 2061
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2062
		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2063
		if (IS_ERR(vma)) {
2064 2065
			err = PTR_ERR(vma);
			goto err_vma;
C
Chris Wilson 已提交
2066
		}
2067

2068
		eb.batch = vma;
2069
	}
2070

2071
	/* Allocate a request for this batch buffer nice and early. */
2072 2073
	eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
	if (IS_ERR(eb.request)) {
2074
		err = PTR_ERR(eb.request);
2075
		goto err_batch_unpin;
2076
	}
2077

2078
	if (in_fence) {
2079 2080
		err = i915_gem_request_await_dma_fence(eb.request, in_fence);
		if (err < 0)
2081 2082 2083 2084
			goto err_request;
	}

	if (out_fence_fd != -1) {
2085
		out_fence = sync_file_create(&eb.request->fence);
2086
		if (!out_fence) {
2087
			err = -ENOMEM;
2088 2089 2090 2091
			goto err_request;
		}
	}

2092 2093
	/*
	 * Whilst this request exists, batch_obj will be on the
2094 2095 2096 2097 2098
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2099
	eb.request->batch = eb.batch;
2100

2101 2102
	trace_i915_gem_request_queue(eb.request, eb.batch_flags);
	err = eb_submit(&eb);
2103
err_request:
2104
	__i915_add_request(eb.request, err == 0);
2105
	add_to_client(eb.request, file);
2106

2107
	if (out_fence) {
2108
		if (err == 0) {
2109 2110 2111 2112 2113 2114 2115 2116
			fd_install(out_fence_fd, out_fence->file);
			args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2117

2118
err_batch_unpin:
2119
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2120
		i915_vma_unpin(eb.batch);
2121 2122 2123 2124 2125
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
	i915_gem_context_put(eb.ctx);
err_unlock:
2126
	mutex_unlock(&dev->struct_mutex);
2127
err_rpm:
2128
	intel_runtime_pm_put(eb.i915);
2129
	eb_destroy(&eb);
2130 2131
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2132
err_in_fence:
2133
	dma_fence_put(in_fence);
2134
	return err;
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
2145
	const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
2146 2147 2148 2149
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2150 2151
	unsigned int i;
	int err;
2152

2153 2154
	if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
2155 2156 2157
		return -EINVAL;
	}

2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

	if (!i915_gem_check_execbuffer(&exec2))
		return -EINVAL;

2172
	/* Copy in the exec list from userland */
2173 2174 2175 2176
	exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list),
				   __GFP_NOWARN | GFP_TEMPORARY);
	exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
				    __GFP_NOWARN | GFP_TEMPORARY);
2177
	if (exec_list == NULL || exec2_list == NULL) {
2178
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2179
			  args->buffer_count);
M
Michal Hocko 已提交
2180 2181
		kvfree(exec_list);
		kvfree(exec2_list);
2182 2183
		return -ENOMEM;
	}
2184
	err = copy_from_user(exec_list,
2185
			     u64_to_user_ptr(args->buffers_ptr),
2186
			     sizeof(*exec_list) * args->buffer_count);
2187
	if (err) {
2188
		DRM_DEBUG("copy %d exec entries failed %d\n",
2189
			  args->buffer_count, err);
M
Michal Hocko 已提交
2190 2191
		kvfree(exec_list);
		kvfree(exec2_list);
2192 2193 2194 2195 2196 2197 2198 2199 2200
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2201
		if (INTEL_GEN(to_i915(dev)) < 4)
2202 2203 2204 2205 2206
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2207 2208
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
	if (exec2.flags & __EXEC_HAS_RELOC) {
2209
		struct drm_i915_gem_exec_object __user *user_exec_list =
2210
			u64_to_user_ptr(args->buffers_ptr);
2211

2212
		/* Copy the new buffer offsets back to the user's exec list. */
2213
		for (i = 0; i < args->buffer_count; i++) {
2214 2215 2216
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2217
			exec2_list[i].offset =
2218 2219 2220 2221 2222
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
2223
				break;
2224 2225 2226
		}
	}

M
Michal Hocko 已提交
2227 2228
	kvfree(exec_list);
	kvfree(exec2_list);
2229
	return err;
2230 2231 2232 2233 2234 2235
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
2236
	const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
2237
	struct drm_i915_gem_execbuffer2 *args = data;
2238 2239
	struct drm_i915_gem_exec_object2 *exec2_list;
	int err;
2240

2241
	if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
2242
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
2243 2244 2245
		return -EINVAL;
	}

2246 2247 2248 2249 2250 2251
	if (!i915_gem_check_execbuffer(args))
		return -EINVAL;

	/* Allocate an extra slot for use by the command parser */
	exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
				    __GFP_NOWARN | GFP_TEMPORARY);
2252
	if (exec2_list == NULL) {
2253
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2254 2255 2256
			  args->buffer_count);
		return -ENOMEM;
	}
2257 2258 2259 2260
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
			   sizeof(*exec2_list) * args->buffer_count)) {
		DRM_DEBUG("copy %d exec entries failed\n", args->buffer_count);
M
Michal Hocko 已提交
2261
		kvfree(exec2_list);
2262 2263 2264
		return -EFAULT;
	}

2265 2266 2267 2268 2269 2270 2271 2272 2273
	err = i915_gem_do_execbuffer(dev, file, args, exec2_list);

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
2274
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2275 2276
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
2277

2278 2279
		/* Copy the new buffer offsets back to the user's exec list. */
		user_access_begin();
2280
		for (i = 0; i < args->buffer_count; i++) {
2281 2282 2283
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2284
			exec2_list[i].offset =
2285 2286 2287 2288
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
2289
		}
2290 2291
end_user:
		user_access_end();
2292 2293
	}

2294
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
M
Michal Hocko 已提交
2295
	kvfree(exec2_list);
2296
	return err;
2297
}