i915_gem_execbuffer.c 90.4 KB
Newer Older
1
/*
2
 * SPDX-License-Identifier: MIT
3
 *
4
 * Copyright © 2008,2010 Intel Corporation
5 6
 */

7
#include <linux/intel-iommu.h>
8
#include <linux/dma-resv.h>
9
#include <linux/sync_file.h>
10 11
#include <linux/uaccess.h>

12
#include <drm/drm_syncobj.h>
13

14 15
#include "display/intel_frontbuffer.h"

16
#include "gem/i915_gem_ioctls.h"
17
#include "gt/intel_context.h"
18
#include "gt/intel_gpu_commands.h"
19
#include "gt/intel_gt.h"
20
#include "gt/intel_gt_buffer_pool.h"
21
#include "gt/intel_gt_pm.h"
22
#include "gt/intel_ring.h"
23

24 25
#include "pxp/intel_pxp.h"

26
#include "i915_drv.h"
27
#include "i915_gem_clflush.h"
28
#include "i915_gem_context.h"
29
#include "i915_gem_ioctls.h"
30
#include "i915_trace.h"
31
#include "i915_user_extensions.h"
32
#include "i915_vma_snapshot.h"
33

34 35 36 37 38 39 40 41 42 43 44 45 46
struct eb_vma {
	struct i915_vma *vma;
	unsigned int flags;

	/** This vma's place in the execbuf reservation list */
	struct drm_i915_gem_exec_object2 *exec;
	struct list_head bind_link;
	struct list_head reloc_link;

	struct hlist_node node;
	u32 handle;
};

47 48 49 50 51 52 53
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};

54 55 56
/* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
57 58 59 60
#define __EXEC_OBJECT_USERPTR_INIT	BIT(28)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(27)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(26)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 26) /* all of the above + */
61
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
62 63

#define __EXEC_HAS_RELOC	BIT(31)
64
#define __EXEC_ENGINE_PINNED	BIT(30)
65 66
#define __EXEC_USERPTR_USED	BIT(29)
#define __EXEC_INTERNAL_FLAGS	(~0u << 29)
67
#define UPDATE			PIN_OFFSET_FIXED
68 69

#define BATCH_OFFSET_BIAS (256*1024)
70

71
#define __I915_EXEC_ILLEGAL_FLAGS \
72 73 74
	(__I915_EXEC_UNKNOWN_FLAGS | \
	 I915_EXEC_CONSTANTS_MASK  | \
	 I915_EXEC_RESOURCE_STREAMER)
75

76 77 78 79 80 81 82 83 84
/* Catch emission of unexpected errors for CI! */
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#undef EINVAL
#define EINVAL ({ \
	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
	22; \
})
#endif

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
 * At the level of talking to the hardware, submitting a batchbuffer for the
 * GPU to execute is to add content to a buffer from which the HW
 * command streamer is reading.
 *
 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
 *    Execlists, this command is not placed on the same buffer as the
 *    remaining items.
 *
 * 2. Add a command to invalidate caches to the buffer.
 *
 * 3. Add a batchbuffer start command to the buffer; the start command is
 *    essentially a token together with the GPU address of the batchbuffer
 *    to be executed.
 *
 * 4. Add a pipeline flush to the buffer.
 *
 * 5. Add a memory write command to the buffer to record when the GPU
 *    is done executing the batchbuffer. The memory write writes the
 *    global sequence number of the request, ``i915_request::global_seqno``;
 *    the i915 driver uses the current value in the register to determine
 *    if the GPU has completed the batchbuffer.
 *
 * 6. Add a user interrupt command to the buffer. This command instructs
 *    the GPU to issue an interrupt when the command, pipeline flush and
 *    memory write are completed.
 *
 * 7. Inform the hardware of the additional commands added to the buffer
 *    (by updating the tail pointer).
 *
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

236 237 238 239 240 241 242
struct eb_fence {
	struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */
	struct dma_fence *dma_fence;
	u64 value;
	struct dma_fence_chain *chain_fence;
};

243
struct i915_execbuffer {
244 245 246 247
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
248
	struct eb_vma *vma;
249

M
Matthew Brost 已提交
250
	struct intel_gt *gt; /* gt for the execbuf */
251 252
	struct intel_context *context; /* logical state for the request */
	struct i915_gem_context *gem_context; /** caller's context */
253

M
Matthew Brost 已提交
254 255 256 257
	/** our requests to build */
	struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
	/** identity of the batch obj/vma */
	struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1];
258
	struct i915_vma *trampoline; /** trampoline used for chaining */
259

M
Matthew Brost 已提交
260 261 262
	/** used for excl fence in dma_resv objects when > 1 BB submitted */
	struct dma_fence *composite_fence;

263 264 265
	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

M
Matthew Brost 已提交
266 267 268
	/* number of batches in execbuf IOCTL */
	unsigned int num_batches;

269 270 271 272 273 274
	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

275 276
	struct i915_gem_ww_ctx ww;

277 278 279 280 281
	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
282
	struct reloc_cache {
283
		struct drm_mm_node node; /** temporary GTT binding */
284 285
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
286
		unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */
287
		bool use_64bit_reloc : 1;
288 289 290
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
291
	} reloc_cache;
292 293 294

	u64 invalid_flags; /** Set of execobj.flags that are invalid */

M
Matthew Brost 已提交
295 296
	/** Length of batch within object */
	u64 batch_len[MAX_ENGINE_INSTANCE + 1];
297 298
	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_flags; /** Flags composed for emit_bb_start() */
299
	struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
300 301 302 303 304 305 306 307

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
308

309 310
	struct eb_fence *fences;
	unsigned long num_fences;
311 312 313
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
	struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1];
#endif
314 315
};

316
static int eb_parse(struct i915_execbuffer *eb);
M
Matthew Brost 已提交
317
static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
318
static void eb_unpin_engine(struct i915_execbuffer *eb);
319
static void eb_capture_release(struct i915_execbuffer *eb);
320

321 322
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
M
Matthew Brost 已提交
323 324
	return intel_engine_requires_cmd_parser(eb->context->engine) ||
		(intel_engine_using_cmd_parser(eb->context->engine) &&
325
		 eb->args->batch_len);
326 327
}

328
static int eb_create(struct i915_execbuffer *eb)
329
{
330 331
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
332

333 334 335 336 337 338 339 340 341 342 343
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
344
		do {
345
			gfp_t flags;
346 347 348 349 350 351 352

			/* While we can still reduce the allocation size, don't
			 * raise a warning and allow the allocation to fail.
			 * On the last pass though, we want to try as hard
			 * as possible to perform the allocation and warn
			 * if it fails.
			 */
353
			flags = GFP_KERNEL;
354 355 356
			if (size > 1)
				flags |= __GFP_NORETRY | __GFP_NOWARN;

357
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
358
					      flags);
359 360 361 362
			if (eb->buckets)
				break;
		} while (--size);

363
		if (unlikely(!size))
364
			return -ENOMEM;
365

366
		eb->lut_size = size;
367
	} else {
368
		eb->lut_size = -eb->buffer_count;
369
	}
370

371
	return 0;
372 373
}

374 375
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
376 377
		 const struct i915_vma *vma,
		 unsigned int flags)
378 379 380 381 382 383 384
{
	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

385
	if (flags & EXEC_OBJECT_PINNED &&
386 387 388
	    vma->node.start != entry->offset)
		return true;

389
	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
390 391 392
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

393
	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
394
	    (vma->node.start + vma->node.size + 4095) >> 32)
395 396
		return true;

397 398 399 400
	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
		return true;

401 402 403
	return false;
}

404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
static u64 eb_pin_flags(const struct drm_i915_gem_exec_object2 *entry,
			unsigned int exec_flags)
{
	u64 pin_flags = 0;

	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
		pin_flags |= PIN_GLOBAL;

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		pin_flags |= PIN_ZONE_4G;

	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
		pin_flags |= PIN_MAPPABLE;

	if (exec_flags & EXEC_OBJECT_PINNED)
		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
	else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;

	return pin_flags;
}

430
static inline int
431
eb_pin_vma(struct i915_execbuffer *eb,
432
	   const struct drm_i915_gem_exec_object2 *entry,
433
	   struct eb_vma *ev)
434
{
435
	struct i915_vma *vma = ev->vma;
436
	u64 pin_flags;
437
	int err;
438

439
	if (vma->node.size)
440
		pin_flags = vma->node.start;
441
	else
442
		pin_flags = entry->offset & PIN_OFFSET_MASK;
443

444
	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
445
	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
446
		pin_flags |= PIN_GLOBAL;
447

448
	/* Attempt to reuse the current location if available */
449 450 451 452 453
	err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags);
	if (err == -EDEADLK)
		return err;

	if (unlikely(err)) {
454
		if (entry->flags & EXEC_OBJECT_PINNED)
455
			return err;
456 457

		/* Failing that pick any _free_ space if suitable */
458
		err = i915_vma_pin_ww(vma, &eb->ww,
459 460 461
					     entry->pad_to_size,
					     entry->alignment,
					     eb_pin_flags(entry, ev->flags) |
462 463 464
					     PIN_USER | PIN_NOEVICT);
		if (unlikely(err))
			return err;
465
	}
466

467
	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
468 469
		err = i915_vma_pin_fence(vma);
		if (unlikely(err)) {
470
			i915_vma_unpin(vma);
471
			return err;
472 473
		}

474
		if (vma->fence)
475
			ev->flags |= __EXEC_OBJECT_HAS_FENCE;
476 477
	}

478
	ev->flags |= __EXEC_OBJECT_HAS_PIN;
479 480 481 482
	if (eb_vma_misplaced(entry, vma, ev->flags))
		return -EBADSLT;

	return 0;
483 484
}

485 486 487 488 489 490
static inline void
eb_unreserve_vma(struct eb_vma *ev)
{
	if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
		return;

491 492 493 494
	if (unlikely(ev->flags & __EXEC_OBJECT_HAS_FENCE))
		__i915_vma_unpin_fence(ev->vma);

	__i915_vma_unpin(ev->vma);
495 496 497
	ev->flags &= ~__EXEC_OBJECT_RESERVED;
}

498 499 500 501
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
502
{
503 504 505 506
	/* Relocations are disallowed for all platforms after TGL-LP.  This
	 * also covers all platforms with local memory.
	 */
	if (entry->relocation_count &&
507
	    GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915))
508 509
		return -EINVAL;

510 511
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
512

513 514
	if (unlikely(entry->alignment &&
		     !is_power_of_2_u64(entry->alignment)))
515 516 517 518 519 520 521
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
522
		     entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
523 524 525 526 527 528 529 530
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
531
	}
532 533 534 535 536 537 538
	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

539 540 541 542 543 544 545 546 547
	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

548
	return 0;
549 550
}

M
Matthew Brost 已提交
551 552 553 554 555 556 557 558 559
static inline bool
is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx)
{
	return eb->args->flags & I915_EXEC_BATCH_FIRST ?
		buffer_idx < eb->num_batches :
		buffer_idx >= eb->args->buffer_count - eb->num_batches;
}

static int
560
eb_add_vma(struct i915_execbuffer *eb,
M
Matthew Brost 已提交
561 562
	   unsigned int *current_batch,
	   unsigned int i,
563
	   struct i915_vma *vma)
564
{
M
Matthew Brost 已提交
565
	struct drm_i915_private *i915 = eb->i915;
566
	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
567
	struct eb_vma *ev = &eb->vma[i];
568

569
	ev->vma = vma;
570 571 572
	ev->exec = entry;
	ev->flags = entry->flags;

573
	if (eb->lut_size > 0) {
574 575
		ev->handle = entry->handle;
		hlist_add_head(&ev->node,
576 577
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
578
	}
579

580
	if (entry->relocation_count)
581
		list_add_tail(&ev->reloc_link, &eb->relocs);
582

583 584 585 586 587 588 589 590 591
	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
M
Matthew Brost 已提交
592
	if (is_batch_buffer(eb, i)) {
593
		if (entry->relocation_count &&
594 595
		    !(ev->flags & EXEC_OBJECT_PINNED))
			ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
596
		if (eb->reloc_cache.has_fence)
597
			ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
598

M
Matthew Brost 已提交
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
		eb->batches[*current_batch] = ev;

		if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) {
			drm_dbg(&i915->drm,
				"Attempting to use self-modifying batch buffer\n");
			return -EINVAL;
		}

		if (range_overflows_t(u64,
				      eb->batch_start_offset,
				      eb->args->batch_len,
				      ev->vma->size)) {
			drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
			return -EINVAL;
		}

		if (eb->args->batch_len == 0)
			eb->batch_len[*current_batch] = ev->vma->size -
				eb->batch_start_offset;
		else
			eb->batch_len[*current_batch] = eb->args->batch_len;
		if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */
			drm_dbg(&i915->drm, "Invalid batch length\n");
			return -EINVAL;
		}

		++*current_batch;
626
	}
M
Matthew Brost 已提交
627 628

	return 0;
629 630
}

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

648
static int eb_reserve_vma(struct i915_execbuffer *eb,
649
			  struct eb_vma *ev,
650
			  u64 pin_flags)
651
{
652 653
	struct drm_i915_gem_exec_object2 *entry = ev->exec;
	struct i915_vma *vma = ev->vma;
654 655
	int err;

656 657 658 659 660 661 662
	if (drm_mm_node_allocated(&vma->node) &&
	    eb_vma_misplaced(entry, vma, ev->flags)) {
		err = i915_vma_unbind(vma);
		if (err)
			return err;
	}

663
	err = i915_vma_pin_ww(vma, &eb->ww,
664
			   entry->pad_to_size, entry->alignment,
665
			   eb_pin_flags(entry, ev->flags) | pin_flags);
666 667 668 669 670 671 672 673
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

674
	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
675
		err = i915_vma_pin_fence(vma);
676 677 678 679 680
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

681
		if (vma->fence)
682
			ev->flags |= __EXEC_OBJECT_HAS_FENCE;
683 684
	}

685
	ev->flags |= __EXEC_OBJECT_HAS_PIN;
686
	GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
687

688 689 690 691 692 693
	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
694
	unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
695
	struct list_head last;
696
	struct eb_vma *ev;
697
	unsigned int i, pass;
698
	int err = 0;
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */
	pass = 0;
	do {
715 716
		list_for_each_entry(ev, &eb->unbound, bind_link) {
			err = eb_reserve_vma(eb, ev, pin_flags);
717 718 719
			if (err)
				break;
		}
720
		if (err != -ENOSPC)
721
			return err;
722 723 724 725 726

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
727
			unsigned int flags;
728

729 730
			ev = &eb->vma[i];
			flags = ev->flags;
731 732
			if (flags & EXEC_OBJECT_PINNED &&
			    flags & __EXEC_OBJECT_HAS_PIN)
733 734
				continue;

735
			eb_unreserve_vma(ev);
736

737
			if (flags & EXEC_OBJECT_PINNED)
738
				/* Pinned must have their slot */
739
				list_add(&ev->bind_link, &eb->unbound);
740
			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
741
				/* Map require the lowest 256MiB (aperture) */
742
				list_add_tail(&ev->bind_link, &eb->unbound);
743 744
			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
				/* Prioritise 4GiB region for restricted bo */
745
				list_add(&ev->bind_link, &last);
746
			else
747
				list_add_tail(&ev->bind_link, &last);
748 749 750 751 752 753 754 755 756
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
757
			mutex_lock(&eb->context->vm->mutex);
758
			err = i915_gem_evict_vm(eb->context->vm);
759
			mutex_unlock(&eb->context->vm->mutex);
760
			if (err)
761
				return err;
762 763 764
			break;

		default:
765
			return -ENOSPC;
766
		}
767 768

		pin_flags = PIN_USER;
769
	} while (1);
770
}
771

772 773 774 775 776
static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
777 778
	if (unlikely(IS_ERR(ctx)))
		return PTR_ERR(ctx);
779

780
	eb->gem_context = ctx;
781
	if (i915_gem_context_has_full_ppgtt(ctx))
782
		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
783 784 785 786

	return 0;
}

787 788
static int __eb_add_lut(struct i915_execbuffer *eb,
			u32 handle, struct i915_vma *vma)
789
{
790 791
	struct i915_gem_context *ctx = eb->gem_context;
	struct i915_lut_handle *lut;
792
	int err;
793

794 795 796 797 798 799 800 801 802 803 804 805
	lut = i915_lut_handle_alloc();
	if (unlikely(!lut))
		return -ENOMEM;

	i915_vma_get(vma);
	if (!atomic_fetch_inc(&vma->open_count))
		i915_vma_reopen(vma);
	lut->handle = handle;
	lut->ctx = ctx;

	/* Check that the context hasn't been closed in the meantime */
	err = -EINTR;
806
	if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
807
		if (likely(!i915_gem_context_is_closed(ctx)))
808
			err = radix_tree_insert(&ctx->handles_vma, handle, vma);
809 810
		else
			err = -ENOENT;
811 812 813
		if (err == 0) { /* And nor has this handle */
			struct drm_i915_gem_object *obj = vma->obj;

814
			spin_lock(&obj->lut_lock);
815 816 817 818 819 820
			if (idr_find(&eb->file->object_idr, handle) == obj) {
				list_add(&lut->obj_link, &obj->lut_list);
			} else {
				radix_tree_delete(&ctx->handles_vma, handle);
				err = -ENOENT;
			}
821
			spin_unlock(&obj->lut_lock);
822
		}
823
		mutex_unlock(&ctx->lut_mutex);
824 825 826
	}
	if (unlikely(err))
		goto err;
827

828
	return 0;
829

830
err:
C
Chris Wilson 已提交
831
	i915_vma_close(vma);
832 833 834 835
	i915_vma_put(vma);
	i915_lut_handle_free(lut);
	return err;
}
836

837 838
static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
{
839 840
	struct i915_address_space *vm = eb->context->vm;

841 842
	do {
		struct drm_i915_gem_object *obj;
843
		struct i915_vma *vma;
844
		int err;
845

846 847
		rcu_read_lock();
		vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle);
848
		if (likely(vma && vma->vm == vm))
849 850 851 852
			vma = i915_vma_tryget(vma);
		rcu_read_unlock();
		if (likely(vma))
			return vma;
853

854
		obj = i915_gem_object_lookup(eb->file, handle);
855 856
		if (unlikely(!obj))
			return ERR_PTR(-ENOENT);
857

858 859 860 861 862 863 864 865 866
		/*
		 * If the user has opted-in for protected-object tracking, make
		 * sure the object encryption can be used.
		 * We only need to do this when the object is first used with
		 * this context, because the context itself will be banned when
		 * the protected objects become invalid.
		 */
		if (i915_gem_context_uses_protected_content(eb->gem_context) &&
		    i915_gem_object_is_protected(obj)) {
867
			err = intel_pxp_key_check(&vm->gt->pxp, obj, true);
868 869 870 871 872 873
			if (err) {
				i915_gem_object_put(obj);
				return ERR_PTR(err);
			}
		}

874
		vma = i915_vma_instance(obj, vm, NULL);
875
		if (IS_ERR(vma)) {
876 877
			i915_gem_object_put(obj);
			return vma;
878 879
		}

880 881 882
		err = __eb_add_lut(eb, handle, vma);
		if (likely(!err))
			return vma;
883

884 885 886 887 888
		i915_gem_object_put(obj);
		if (err != -EEXIST)
			return ERR_PTR(err);
	} while (1);
}
889

890 891
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
M
Matthew Brost 已提交
892
	unsigned int i, current_batch = 0;
893
	int err = 0;
894

895 896 897 898 899 900 901 902
	INIT_LIST_HEAD(&eb->relocs);

	for (i = 0; i < eb->buffer_count; i++) {
		struct i915_vma *vma;

		vma = eb_lookup_vma(eb, eb->exec[i].handle);
		if (IS_ERR(vma)) {
			err = PTR_ERR(vma);
903
			goto err;
904
		}
905

906
		err = eb_validate_vma(eb, &eb->exec[i], vma);
907 908
		if (unlikely(err)) {
			i915_vma_put(vma);
909
			goto err;
910
		}
911

M
Matthew Brost 已提交
912 913 914
		err = eb_add_vma(eb, &current_batch, i, vma);
		if (err)
			return err;
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934

		if (i915_gem_object_is_userptr(vma->obj)) {
			err = i915_gem_object_userptr_submit_init(vma->obj);
			if (err) {
				if (i + 1 < eb->buffer_count) {
					/*
					 * Execbuffer code expects last vma entry to be NULL,
					 * since we already initialized this entry,
					 * set the next value to NULL or we mess up
					 * cleanup handling.
					 */
					eb->vma[i + 1].vma = NULL;
				}

				return err;
			}

			eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
			eb->args->flags |= __EXEC_USERPTR_USED;
		}
935 936
	}

937 938 939
	return 0;

err:
940
	eb->vma[i].vma = NULL;
941
	return err;
942 943
}

944
static int eb_lock_vmas(struct i915_execbuffer *eb)
945 946 947 948 949 950 951 952 953 954 955
{
	unsigned int i;
	int err;

	for (i = 0; i < eb->buffer_count; i++) {
		struct eb_vma *ev = &eb->vma[i];
		struct i915_vma *vma = ev->vma;

		err = i915_gem_object_lock(vma->obj, &eb->ww);
		if (err)
			return err;
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
	}

	return 0;
}

static int eb_validate_vmas(struct i915_execbuffer *eb)
{
	unsigned int i;
	int err;

	INIT_LIST_HEAD(&eb->unbound);

	err = eb_lock_vmas(eb);
	if (err)
		return err;

	for (i = 0; i < eb->buffer_count; i++) {
		struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
		struct eb_vma *ev = &eb->vma[i];
		struct i915_vma *vma = ev->vma;
976

977 978 979 980 981
		err = eb_pin_vma(eb, entry, ev);
		if (err == -EDEADLK)
			return err;

		if (!err) {
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
			if (entry->offset != vma->node.start) {
				entry->offset = vma->node.start | UPDATE;
				eb->args->flags |= __EXEC_HAS_RELOC;
			}
		} else {
			eb_unreserve_vma(ev);

			list_add_tail(&ev->bind_link, &eb->unbound);
			if (drm_mm_node_allocated(&vma->node)) {
				err = i915_vma_unbind(vma);
				if (err)
					return err;
			}
		}

997
		if (!(ev->flags & EXEC_OBJECT_WRITE)) {
998
			err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
999 1000 1001 1002
			if (err)
				return err;
		}

1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
			   eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
	}

	if (!list_empty(&eb->unbound))
		return eb_reserve(eb);

	return 0;
}

1013
static struct eb_vma *
1014
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
1015
{
1016 1017
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
1018
			return NULL;
1019
		return &eb->vma[handle];
1020 1021
	} else {
		struct hlist_head *head;
1022
		struct eb_vma *ev;
1023

1024
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
1025 1026 1027
		hlist_for_each_entry(ev, head, node) {
			if (ev->handle == handle)
				return ev;
1028 1029 1030
		}
		return NULL;
	}
1031 1032
}

1033
static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
		struct eb_vma *ev = &eb->vma[i];
		struct i915_vma *vma = ev->vma;

		if (!vma)
			break;

1045
		eb_unreserve_vma(ev);
1046

1047 1048
		if (final)
			i915_vma_put(vma);
1049
	}
1050

1051
	eb_capture_release(eb);
1052
	eb_unpin_engine(eb);
1053 1054
}

1055
static void eb_destroy(const struct i915_execbuffer *eb)
1056
{
1057
	if (eb->lut_size > 0)
1058
		kfree(eb->buckets);
1059 1060
}

1061
static inline u64
1062
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
1063
		  const struct i915_vma *target)
1064
{
1065
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
1066 1067
}

1068 1069
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
1070
{
1071 1072
	cache->page = -1;
	cache->vaddr = 0;
1073
	/* Must be a variable in the struct to allow GCC to unroll. */
1074
	cache->graphics_ver = GRAPHICS_VER(i915);
1075
	cache->has_llc = HAS_LLC(i915);
1076
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
1077
	cache->has_fence = cache->graphics_ver < 4;
1078
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
1079
	cache->node.flags = 0;
1080
}
1081

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
}

#define KMAP 0x4 /* after CLFLUSH_FLAGS */

static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
static void reloc_cache_unmap(struct reloc_cache *cache)
{
	void *vaddr;

	if (!cache->vaddr)
		return;

	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP)
		kunmap_atomic(vaddr);
	else
		io_mapping_unmap_atomic((void __iomem *)vaddr);
}

static void reloc_cache_remap(struct reloc_cache *cache,
			      struct drm_i915_gem_object *obj)
{
	void *vaddr;

	if (!cache->vaddr)
		return;

	if (cache->vaddr & KMAP) {
		struct page *page = i915_gem_object_get_page(obj, cache->page);

		vaddr = kmap_atomic(page);
		cache->vaddr = unmask_flags(cache->vaddr) |
			(unsigned long)vaddr;
	} else {
		struct i915_ggtt *ggtt = cache_to_ggtt(cache);
		unsigned long offset;

		offset = cache->node.start;
		if (!drm_mm_node_allocated(&cache->node))
			offset += cache->page << PAGE_SHIFT;

		cache->vaddr = (unsigned long)
			io_mapping_map_atomic_wc(&ggtt->iomap, offset);
	}
}

1142
static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
1143 1144 1145 1146 1147 1148 1149 1150
{
	void *vaddr;

	if (!cache->vaddr)
		return;

	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
1151 1152
		struct drm_i915_gem_object *obj =
			(struct drm_i915_gem_object *)cache->node.mm;
1153 1154 1155 1156
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();

		kunmap_atomic(vaddr);
1157
		i915_gem_object_finish_access(obj);
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
	} else {
		struct i915_ggtt *ggtt = cache_to_ggtt(cache);

		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
		io_mapping_unmap_atomic((void __iomem *)vaddr);

		if (drm_mm_node_allocated(&cache->node)) {
			ggtt->vm.clear_range(&ggtt->vm,
					     cache->node.start,
					     cache->node.size);
			mutex_lock(&ggtt->vm.mutex);
			drm_mm_remove_node(&cache->node);
			mutex_unlock(&ggtt->vm.mutex);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
		}
	}

	cache->vaddr = 0;
	cache->page = -1;
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
1182
			unsigned long pageno)
1183 1184
{
	void *vaddr;
1185
	struct page *page;
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
		int err;

		err = i915_gem_object_prepare_write(obj, &flushes);
		if (err)
			return ERR_PTR(err);

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);

		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
	}

1206 1207 1208 1209 1210
	page = i915_gem_object_get_page(obj, pageno);
	if (!obj->mm.dirty)
		set_page_dirty(page);

	vaddr = kmap_atomic(page);
1211
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1212
	cache->page = pageno;
1213 1214 1215 1216 1217

	return vaddr;
}

static void *reloc_iomap(struct drm_i915_gem_object *obj,
1218
			 struct i915_execbuffer *eb,
1219 1220
			 unsigned long page)
{
1221
	struct reloc_cache *cache = &eb->reloc_cache;
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
	unsigned long offset;
	void *vaddr;

	if (cache->vaddr) {
		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
	} else {
		struct i915_vma *vma;
		int err;

		if (i915_gem_object_is_tiled(obj))
			return ERR_PTR(-EINVAL);

		if (use_cpu_reloc(cache, obj))
			return NULL;

		err = i915_gem_object_set_to_gtt_domain(obj, true);
		if (err)
			return ERR_PTR(err);

1243 1244 1245 1246 1247 1248 1249
		vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0,
						  PIN_MAPPABLE |
						  PIN_NONBLOCK /* NOWARN */ |
						  PIN_NOEVICT);
		if (vma == ERR_PTR(-EDEADLK))
			return vma;

1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
			mutex_lock(&ggtt->vm.mutex);
			err = drm_mm_insert_node_in_range
				(&ggtt->vm.mm, &cache->node,
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
				 0, ggtt->mappable_end,
				 DRM_MM_INSERT_LOW);
			mutex_unlock(&ggtt->vm.mutex);
			if (err) /* no inactive aperture space, use cpu reloc */
				return NULL;
		} else {
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
		}
	}

	offset = cache->node.start;
	if (drm_mm_node_allocated(&cache->node)) {
		ggtt->vm.insert_page(&ggtt->vm,
				     i915_gem_object_get_dma_address(obj, page),
				     offset, I915_CACHE_NONE, 0);
	} else {
		offset += page << PAGE_SHIFT;
	}

	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
							 offset);
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;

	return vaddr;
}

static void *reloc_vaddr(struct drm_i915_gem_object *obj,
1285
			 struct i915_execbuffer *eb,
1286 1287
			 unsigned long page)
{
1288
	struct reloc_cache *cache = &eb->reloc_cache;
1289 1290 1291 1292 1293 1294 1295
	void *vaddr;

	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
1296
			vaddr = reloc_iomap(obj, eb, page);
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
	}

	return vaddr;
}

static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
{
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}

		*addr = value;

		/*
		 * Writes to the same cacheline are serialised by the CPU
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
}

1327
static u64
1328
relocate_entry(struct i915_vma *vma,
1329
	       const struct drm_i915_gem_relocation_entry *reloc,
1330
	       struct i915_execbuffer *eb,
1331 1332 1333
	       const struct i915_vma *target)
{
	u64 target_addr = relocation_target(reloc, target);
1334
	u64 offset = reloc->offset;
1335 1336
	bool wide = eb->reloc_cache.use_64bit_reloc;
	void *vaddr;
1337 1338

repeat:
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
	vaddr = reloc_vaddr(vma->obj, eb,
			    offset >> PAGE_SHIFT);
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_addr),
			eb->reloc_cache.vaddr);

	if (wide) {
		offset += sizeof(u32);
		target_addr >>= 32;
		wide = false;
		goto repeat;
1354
	}
1355

1356
	return target->node.start | UPDATE;
1357 1358
}

1359 1360
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
1361
		  struct eb_vma *ev,
1362
		  const struct drm_i915_gem_relocation_entry *reloc)
1363
{
1364
	struct drm_i915_private *i915 = eb->i915;
1365
	struct eb_vma *target;
1366
	int err;
1367

1368
	/* we've already hold a reference to all valid objects */
1369 1370
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1371
		return -ENOENT;
1372

1373
	/* Validate that the target is in a valid r/w GPU domain */
1374
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1375
		drm_dbg(&i915->drm, "reloc with multiple write domains: "
1376
			  "target %d offset %d "
1377
			  "read %08x write %08x",
1378
			  reloc->target_handle,
1379 1380 1381
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1382
		return -EINVAL;
1383
	}
1384 1385
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1386
		drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
1387
			  "target %d offset %d "
1388
			  "read %08x write %08x",
1389
			  reloc->target_handle,
1390 1391 1392
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1393
		return -EINVAL;
1394 1395
	}

1396
	if (reloc->write_domain) {
1397
		target->flags |= EXEC_OBJECT_WRITE;
1398

1399 1400 1401 1402 1403 1404 1405
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1406 1407 1408 1409 1410 1411
		    GRAPHICS_VER(eb->i915) == 6 &&
		    !i915_vma_is_bound(target->vma, I915_VMA_GLOBAL_BIND)) {
			struct i915_vma *vma = target->vma;

			reloc_cache_unmap(&eb->reloc_cache);
			mutex_lock(&vma->vm->mutex);
1412 1413
			err = i915_vma_bind(target->vma,
					    target->vma->obj->cache_level,
1414
					    PIN_GLOBAL, NULL);
1415 1416
			mutex_unlock(&vma->vm->mutex);
			reloc_cache_remap(&eb->reloc_cache, ev->vma->obj);
1417
			if (err)
1418 1419
				return err;
		}
1420
	}
1421

1422 1423
	/*
	 * If the relocation already has the right value in it, no
1424 1425
	 * more work needs to be done.
	 */
1426 1427
	if (!DBG_FORCE_RELOC &&
	    gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
1428
		return 0;
1429 1430

	/* Check that the relocation address is valid... */
1431
	if (unlikely(reloc->offset >
1432
		     ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1433
		drm_dbg(&i915->drm, "Relocation beyond object bounds: "
1434 1435 1436
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
1437
			  (int)ev->vma->size);
1438
		return -EINVAL;
1439
	}
1440
	if (unlikely(reloc->offset & 3)) {
1441
		drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
1442 1443 1444
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1445
		return -EINVAL;
1446 1447
	}

1448 1449 1450 1451 1452 1453
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
1454
	 * out of our synchronisation.
1455
	 */
1456
	ev->flags &= ~EXEC_OBJECT_ASYNC;
1457

1458
	/* and update the user's relocation entry */
1459
	return relocate_entry(ev->vma, reloc, eb, target->vma);
1460 1461
}

1462
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
1463
{
1464
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1465
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1466
	const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1467 1468 1469
	struct drm_i915_gem_relocation_entry __user *urelocs =
		u64_to_user_ptr(entry->relocs_ptr);
	unsigned long remain = entry->relocation_count;
1470

1471
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
1472
		return -EINVAL;
1473

1474 1475 1476 1477 1478
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
1479
	if (unlikely(!access_ok(urelocs, remain * sizeof(*urelocs))))
1480 1481 1482 1483 1484
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
1485
			min_t(unsigned long, remain, ARRAY_SIZE(stack));
1486
		unsigned int copied;
1487

1488 1489
		/*
		 * This is the fast path and we cannot handle a pagefault
1490 1491 1492 1493 1494 1495
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
1496 1497 1498
		pagefault_disable();
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
		pagefault_enable();
1499 1500 1501 1502
		if (unlikely(copied)) {
			remain = -EFAULT;
			goto out;
		}
1503

1504
		remain -= count;
1505
		do {
1506
			u64 offset = eb_relocate_entry(eb, ev, r);
1507

1508 1509
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
1510 1511
				remain = (int)offset;
				goto out;
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
1535 1536
				__put_user(offset,
					   &urelocs[r - stack].presumed_offset);
1537
			}
1538 1539 1540
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1541
out:
1542
	reloc_cache_reset(&eb->reloc_cache, eb);
1543
	return remain;
1544 1545
}

1546 1547
static int
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
1548
{
1549 1550 1551 1552
	const struct drm_i915_gem_exec_object2 *entry = ev->exec;
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
1553 1554
	int err;

1555 1556
	for (i = 0; i < entry->relocation_count; i++) {
		u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
1557

1558 1559 1560 1561
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1562
	}
1563 1564
	err = 0;
err:
1565
	reloc_cache_reset(&eb->reloc_cache, eb);
1566 1567
	return err;
}
1568

1569 1570 1571 1572 1573
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
{
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1574

1575 1576 1577
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1578

1579 1580
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1581

1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
	if (!access_ok(addr, size))
		return -EFAULT;

	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
	}
	return __get_user(c, end - 1);
1594 1595
}

1596
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1597
{
1598
	struct drm_i915_gem_relocation_entry *relocs;
1599 1600
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1601
	int err;
1602

1603
	for (i = 0; i < count; i++) {
1604 1605 1606 1607
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		unsigned long size;
		unsigned long copied;
1608

1609 1610
		if (nreloc == 0)
			continue;
1611

1612 1613 1614
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1615

1616 1617
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1618

1619 1620 1621 1622
		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
		if (!relocs) {
			err = -ENOMEM;
			goto err;
1623
		}
1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692

		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
					     (char __user *)urelocs + copied,
					     len))
				goto end;

			copied += len;
		} while (copied < size);

		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		if (!user_access_begin(urelocs, size))
			goto end;

		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
		user_access_end();

		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}

	return 0;

end_user:
	user_access_end();
end:
	kvfree(relocs);
	err = -EFAULT;
err:
	while (i--) {
		relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
}

static int eb_prefault_relocations(const struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
		int err;

		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}

	return 0;
}

1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
static int eb_reinit_userptr(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int ret;

	if (likely(!(eb->args->flags & __EXEC_USERPTR_USED)))
		return 0;

	for (i = 0; i < count; i++) {
		struct eb_vma *ev = &eb->vma[i];

		if (!i915_gem_object_is_userptr(ev->vma->obj))
			continue;

		ret = i915_gem_object_userptr_submit_init(ev->vma->obj);
		if (ret)
			return ret;

		ev->flags |= __EXEC_OBJECT_USERPTR_INIT;
	}

	return 0;
}

M
Matthew Brost 已提交
1718
static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb)
1719 1720 1721 1722 1723 1724 1725 1726 1727
{
	bool have_copy = false;
	struct eb_vma *ev;
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
1728
	}
1729

1730
	/* We may process another execbuffer during the unlock... */
1731
	eb_release_vmas(eb, false);
1732 1733
	i915_gem_ww_ctx_fini(&eb->ww);

1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
	}

1757
	if (!err)
1758
		err = eb_reinit_userptr(eb);
1759

1760
	i915_gem_ww_ctx_init(&eb->ww, true);
1761 1762 1763
	if (err)
		goto out;

1764 1765
	/* reacquire the objects */
repeat_validate:
M
Matthew Brost 已提交
1766 1767
	err = eb_pin_engine(eb, false);
	if (err)
1768 1769
		goto err;

1770
	err = eb_validate_vmas(eb);
1771
	if (err)
1772 1773
		goto err;

M
Matthew Brost 已提交
1774
	GEM_BUG_ON(!eb->batches[0]);
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787

	list_for_each_entry(ev, &eb->relocs, reloc_link) {
		if (!have_copy) {
			err = eb_relocate_vma(eb, ev);
			if (err)
				break;
		} else {
			err = eb_relocate_vma_slow(eb, ev);
			if (err)
				break;
		}
	}

1788 1789 1790
	if (err == -EDEADLK)
		goto err;

1791 1792 1793 1794 1795 1796
	if (err && !have_copy)
		goto repeat;

	if (err)
		goto err;

1797 1798 1799 1800 1801
	/* as last step, parse the command buffer */
	err = eb_parse(eb);
	if (err)
		goto err;

1802 1803 1804 1805 1806 1807 1808 1809
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1810
	if (err == -EDEADLK) {
1811
		eb_release_vmas(eb, false);
1812 1813 1814 1815 1816
		err = i915_gem_ww_ctx_backoff(&eb->ww);
		if (!err)
			goto repeat_validate;
	}

1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

	return err;
}

1841
static int eb_relocate_parse(struct i915_execbuffer *eb)
1842
{
1843
	int err;
1844
	bool throttle = true;
1845

1846
retry:
M
Matthew Brost 已提交
1847 1848
	err = eb_pin_engine(eb, throttle);
	if (err) {
1849 1850 1851 1852 1853 1854 1855 1856 1857
		if (err != -EDEADLK)
			return err;

		goto err;
	}

	/* only throttle once, even if we didn't need to throttle */
	throttle = false;

1858 1859 1860 1861 1862
	err = eb_validate_vmas(eb);
	if (err == -EAGAIN)
		goto slow;
	else if (err)
		goto err;
1863 1864 1865

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
1866
		struct eb_vma *ev;
1867

1868
		list_for_each_entry(ev, &eb->relocs, reloc_link) {
1869 1870
			err = eb_relocate_vma(eb, ev);
			if (err)
1871
				break;
1872
		}
1873

1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
		if (err == -EDEADLK)
			goto err;
		else if (err)
			goto slow;
	}

	if (!err)
		err = eb_parse(eb);

err:
	if (err == -EDEADLK) {
1885
		eb_release_vmas(eb, false);
1886 1887 1888
		err = i915_gem_ww_ctx_backoff(&eb->ww);
		if (!err)
			goto retry;
1889 1890
	}

1891 1892 1893
	return err;

slow:
M
Matthew Brost 已提交
1894
	err = eb_relocate_parse_slow(eb);
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
	if (err)
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		eb->args->flags &= ~__EXEC_HAS_RELOC;

	return err;
1906 1907
}

M
Matthew Brost 已提交
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
/*
 * Using two helper loops for the order of which requests / batches are created
 * and added the to backend. Requests are created in order from the parent to
 * the last child. Requests are added in the reverse order, from the last child
 * to parent. This is done for locking reasons as the timeline lock is acquired
 * during request creation and released when the request is added to the
 * backend. To make lockdep happy (see intel_context_timeline_lock) this must be
 * the ordering.
 */
#define for_each_batch_create_order(_eb, _i) \
	for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i))
#define for_each_batch_add_order(_eb, _i) \
	BUILD_BUG_ON(!typecheck(int, _i)); \
	for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i))

static struct i915_request *
eb_find_first_request_added(struct i915_execbuffer *eb)
{
	int i;

	for_each_batch_add_order(eb, i)
		if (eb->requests[i])
			return eb->requests[i];

	GEM_BUG_ON("Request not found");

	return NULL;
}

1937 1938 1939 1940
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)

/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */
static void eb_capture_stage(struct i915_execbuffer *eb)
1941 1942
{
	const unsigned int count = eb->buffer_count;
1943 1944
	unsigned int i = count, j;
	struct i915_vma_snapshot *vsnap;
1945 1946

	while (i--) {
1947 1948 1949
		struct eb_vma *ev = &eb->vma[i];
		struct i915_vma *vma = ev->vma;
		unsigned int flags = ev->flags;
1950

1951 1952
		if (!(flags & EXEC_OBJECT_CAPTURE))
			continue;
1953

1954 1955 1956 1957 1958 1959
		vsnap = i915_vma_snapshot_alloc(GFP_KERNEL);
		if (!vsnap)
			continue;

		i915_vma_snapshot_init(vsnap, vma, "user");
		for_each_batch_create_order(eb, j) {
1960
			struct i915_capture_list *capture;
1961

1962 1963 1964
			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (!capture)
				continue;
M
Matthew Brost 已提交
1965

1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
			capture->next = eb->capture_lists[j];
			capture->vma_snapshot = i915_vma_snapshot_get(vsnap);
			eb->capture_lists[j] = capture;
		}
		i915_vma_snapshot_put(vsnap);
	}
}

/* Commit once we're in the critical path */
static void eb_capture_commit(struct i915_execbuffer *eb)
{
	unsigned int j;

	for_each_batch_create_order(eb, j) {
		struct i915_request *rq = eb->requests[j];

		if (!rq)
			break;

		rq->capture_list = eb->capture_lists[j];
		eb->capture_lists[j] = NULL;
	}
}

/*
 * Release anything that didn't get committed due to errors.
 * The capture_list will otherwise be freed at request retire.
 */
static void eb_capture_release(struct i915_execbuffer *eb)
{
	unsigned int j;

	for_each_batch_create_order(eb, j) {
		if (eb->capture_lists[j]) {
			i915_request_free_capture_list(eb->capture_lists[j]);
			eb->capture_lists[j] = NULL;
2002
		}
2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
	}
}

static void eb_capture_list_clear(struct i915_execbuffer *eb)
{
	memset(eb->capture_lists, 0, sizeof(eb->capture_lists));
}

#else

static void eb_capture_stage(struct i915_execbuffer *eb)
{
}

static void eb_capture_commit(struct i915_execbuffer *eb)
{
}

static void eb_capture_release(struct i915_execbuffer *eb)
{
}

static void eb_capture_list_clear(struct i915_execbuffer *eb)
{
}

#endif

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i = count;
	int err = 0, j;

	while (i--) {
		struct eb_vma *ev = &eb->vma[i];
		struct i915_vma *vma = ev->vma;
		unsigned int flags = ev->flags;
		struct drm_i915_gem_object *obj = vma->obj;

		assert_vma_held(vma);
2044

2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055
		/*
		 * If the GPU is not _reading_ through the CPU cache, we need
		 * to make sure that any writes (both previous GPU writes from
		 * before a change in snooping levels and normal CPU writes)
		 * caught in that cache are flushed to main memory.
		 *
		 * We want to say
		 *   obj->cache_dirty &&
		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		 * but gcc's optimiser doesn't handle that as well and emits
		 * two jumps instead of one. Maybe one day...
2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
		 *
		 * FIXME: There is also sync flushing in set_pages(), which
		 * serves a different purpose(some of the time at least).
		 *
		 * We should consider:
		 *
		 *   1. Rip out the async flush code.
		 *
		 *   2. Or make the sync flushing use the async clflush path
		 *   using mandatory fences underneath. Currently the below
		 *   async flush happens after we bind the object.
2067 2068
		 */
		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
2069
			if (i915_gem_clflush_object(obj, 0))
2070
				flags &= ~EXEC_OBJECT_ASYNC;
2071 2072
		}

M
Matthew Brost 已提交
2073
		/* We only need to await on the first request */
2074 2075
		if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
			err = i915_request_await_object
M
Matthew Brost 已提交
2076 2077
				(eb_find_first_request_added(eb), obj,
				 flags & EXEC_OBJECT_WRITE);
2078
		}
2079

M
Matthew Brost 已提交
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
		for_each_batch_add_order(eb, j) {
			if (err)
				break;
			if (!eb->requests[j])
				continue;

			err = _i915_vma_move_to_active(vma, eb->requests[j],
						       j ? NULL :
						       eb->composite_fence ?
						       eb->composite_fence :
						       &eb->requests[j]->fence,
						       flags | __EXEC_OBJECT_NO_RESERVE);
		}
2093
	}
2094

2095 2096
#ifdef CONFIG_MMU_NOTIFIER
	if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
2097
		read_lock(&eb->i915->mm.notifier_lock);
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114

		/*
		 * count is always at least 1, otherwise __EXEC_USERPTR_USED
		 * could not have been set
		 */
		for (i = 0; i < count; i++) {
			struct eb_vma *ev = &eb->vma[i];
			struct drm_i915_gem_object *obj = ev->vma->obj;

			if (!i915_gem_object_is_userptr(obj))
				continue;

			err = i915_gem_object_userptr_submit_done(obj);
			if (err)
				break;
		}

2115
		read_unlock(&eb->i915->mm.notifier_lock);
2116 2117 2118
	}
#endif

2119 2120 2121
	if (unlikely(err))
		goto err_skip;

2122
	/* Unconditionally flush any chipset caches (for streaming writes). */
M
Matthew Brost 已提交
2123
	intel_gt_chipset_flush(eb->gt);
2124 2125
	eb_capture_commit(eb);

2126
	return 0;
2127 2128

err_skip:
M
Matthew Brost 已提交
2129 2130 2131 2132 2133 2134
	for_each_batch_create_order(eb, j) {
		if (!eb->requests[j])
			break;

		i915_request_set_error_once(eb->requests[j], err);
	}
2135
	return err;
2136 2137
}

T
Tvrtko Ursulin 已提交
2138
static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
2139
{
2140
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
T
Tvrtko Ursulin 已提交
2141
		return -EINVAL;
2142

C
Chris Wilson 已提交
2143
	/* Kernel clipping was a DRI1 misfeature */
2144 2145
	if (!(exec->flags & (I915_EXEC_FENCE_ARRAY |
			     I915_EXEC_USE_EXTENSIONS))) {
2146
		if (exec->num_cliprects || exec->cliprects_ptr)
T
Tvrtko Ursulin 已提交
2147
			return -EINVAL;
2148
	}
C
Chris Wilson 已提交
2149 2150 2151 2152 2153 2154

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
T
Tvrtko Ursulin 已提交
2155
		return -EINVAL;
C
Chris Wilson 已提交
2156 2157

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
T
Tvrtko Ursulin 已提交
2158
		return -EINVAL;
C
Chris Wilson 已提交
2159

T
Tvrtko Ursulin 已提交
2160
	return 0;
2161 2162
}

2163
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
2164
{
2165 2166
	u32 *cs;
	int i;
2167

2168
	if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) {
2169
		drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n");
2170 2171
		return -EINVAL;
	}
2172

2173
	cs = intel_ring_begin(rq, 4 * 2 + 2);
2174 2175
	if (IS_ERR(cs))
		return PTR_ERR(cs);
2176

2177
	*cs++ = MI_LOAD_REGISTER_IMM(4);
2178
	for (i = 0; i < 4; i++) {
2179 2180
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
2181
	}
2182
	*cs++ = MI_NOOP;
2183
	intel_ring_advance(rq, cs);
2184 2185 2186 2187

	return 0;
}

2188
static struct i915_vma *
2189 2190
shadow_batch_pin(struct i915_execbuffer *eb,
		 struct drm_i915_gem_object *obj,
2191 2192
		 struct i915_address_space *vm,
		 unsigned int flags)
2193
{
2194 2195
	struct i915_vma *vma;
	int err;
2196

2197 2198 2199 2200
	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma))
		return vma;

2201
	err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, flags);
2202 2203 2204 2205
	if (err)
		return ERR_PTR(err);

	return vma;
2206 2207
}

2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
static struct i915_vma *eb_dispatch_secure(struct i915_execbuffer *eb, struct i915_vma *vma)
{
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
	 * hsw should have this fixed, but bdw mucks it up again. */
	if (eb->batch_flags & I915_DISPATCH_SECURE)
		return i915_gem_object_ggtt_pin_ww(vma->obj, &eb->ww, NULL, 0, 0, 0);

	return NULL;
}

2220
static int eb_parse(struct i915_execbuffer *eb)
2221
{
2222
	struct drm_i915_private *i915 = eb->i915;
2223
	struct intel_gt_buffer_pool_node *pool = eb->batch_pool;
2224
	struct i915_vma *shadow, *trampoline, *batch;
2225
	unsigned long len;
2226
	int err;
2227

2228
	if (!eb_use_cmdparser(eb)) {
M
Matthew Brost 已提交
2229
		batch = eb_dispatch_secure(eb, eb->batches[0]->vma);
2230 2231 2232 2233 2234
		if (IS_ERR(batch))
			return PTR_ERR(batch);

		goto secure_batch;
	}
2235

M
Matthew Brost 已提交
2236 2237 2238 2239
	if (intel_context_is_parallel(eb->context))
		return -EINVAL;

	len = eb->batch_len[0];
2240 2241 2242 2243 2244 2245
	if (!CMDPARSER_USES_GGTT(eb->i915)) {
		/*
		 * ppGTT backed shadow buffers must be mapped RO, to prevent
		 * post-scan tampering
		 */
		if (!eb->context->vm->has_read_only) {
2246 2247
			drm_dbg(&i915->drm,
				"Cannot prevent post-scan tampering without RO capable vm\n");
2248 2249 2250 2251 2252
			return -EINVAL;
		}
	} else {
		len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
	}
M
Matthew Brost 已提交
2253
	if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */
2254
		return -EINVAL;
2255

2256
	if (!pool) {
M
Matthew Brost 已提交
2257
		pool = intel_gt_get_buffer_pool(eb->gt, len,
2258
						I915_MAP_WB);
2259 2260 2261 2262
		if (IS_ERR(pool))
			return PTR_ERR(pool);
		eb->batch_pool = pool;
	}
2263

2264 2265 2266
	err = i915_gem_object_lock(pool->obj, &eb->ww);
	if (err)
		goto err;
2267

2268
	shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
2269 2270
	if (IS_ERR(shadow)) {
		err = PTR_ERR(shadow);
2271
		goto err;
2272
	}
2273
	intel_gt_buffer_pool_mark_used(pool);
2274
	i915_gem_object_set_readonly(shadow->obj);
2275
	shadow->private = pool;
2276 2277 2278 2279 2280

	trampoline = NULL;
	if (CMDPARSER_USES_GGTT(eb->i915)) {
		trampoline = shadow;

2281
		shadow = shadow_batch_pin(eb, pool->obj,
M
Matthew Brost 已提交
2282
					  &eb->gt->ggtt->vm,
2283 2284 2285 2286 2287 2288
					  PIN_GLOBAL);
		if (IS_ERR(shadow)) {
			err = PTR_ERR(shadow);
			shadow = trampoline;
			goto err_shadow;
		}
2289
		shadow->private = pool;
2290 2291 2292

		eb->batch_flags |= I915_DISPATCH_SECURE;
	}
2293

2294 2295 2296 2297 2298 2299
	batch = eb_dispatch_secure(eb, shadow);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_trampoline;
	}

2300
	err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
2301 2302 2303
	if (err)
		goto err_trampoline;

M
Matthew Brost 已提交
2304 2305
	err = intel_engine_cmd_parser(eb->context->engine,
				      eb->batches[0]->vma,
2306
				      eb->batch_start_offset,
M
Matthew Brost 已提交
2307
				      eb->batch_len[0],
2308
				      shadow, trampoline);
2309
	if (err)
2310
		goto err_unpin_batch;
2311

M
Matthew Brost 已提交
2312 2313 2314
	eb->batches[0] = &eb->vma[eb->buffer_count++];
	eb->batches[0]->vma = i915_vma_get(shadow);
	eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
2315

2316
	eb->trampoline = trampoline;
2317 2318
	eb->batch_start_offset = 0;

2319 2320
secure_batch:
	if (batch) {
M
Matthew Brost 已提交
2321 2322 2323 2324 2325 2326
		if (intel_context_is_parallel(eb->context))
			return -EINVAL;

		eb->batches[0] = &eb->vma[eb->buffer_count++];
		eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
		eb->batches[0]->vma = i915_vma_get(batch);
2327
	}
2328
	return 0;
2329

2330 2331 2332
err_unpin_batch:
	if (batch)
		i915_vma_unpin(batch);
2333 2334 2335 2336 2337
err_trampoline:
	if (trampoline)
		i915_vma_unpin(trampoline);
err_shadow:
	i915_vma_unpin(shadow);
2338
err:
2339
	return err;
2340
}
2341

M
Matthew Brost 已提交
2342 2343 2344 2345
static int eb_request_submit(struct i915_execbuffer *eb,
			     struct i915_request *rq,
			     struct i915_vma *batch,
			     u64 batch_len)
2346
{
2347
	int err;
2348

M
Matthew Brost 已提交
2349 2350
	if (intel_context_nopreempt(rq->context))
		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
2351

2352
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
M
Matthew Brost 已提交
2353
		err = i915_reset_gen7_sol_offsets(rq);
2354 2355
		if (err)
			return err;
2356 2357
	}

2358 2359 2360 2361 2362 2363
	/*
	 * After we completed waiting for other engines (using HW semaphores)
	 * then we can signal that this request/batch is ready to run. This
	 * allows us to determine if the batch is still waiting on the GPU
	 * or actually running by checking the breadcrumb.
	 */
M
Matthew Brost 已提交
2364 2365
	if (rq->context->engine->emit_init_breadcrumb) {
		err = rq->context->engine->emit_init_breadcrumb(rq);
2366 2367 2368 2369
		if (err)
			return err;
	}

M
Matthew Brost 已提交
2370 2371 2372 2373 2374
	err = rq->context->engine->emit_bb_start(rq,
						 batch->node.start +
						 eb->batch_start_offset,
						 batch_len,
						 eb->batch_flags);
2375 2376
	if (err)
		return err;
2377

2378
	if (eb->trampoline) {
M
Matthew Brost 已提交
2379
		GEM_BUG_ON(intel_context_is_parallel(rq->context));
2380
		GEM_BUG_ON(eb->batch_start_offset);
M
Matthew Brost 已提交
2381 2382 2383
		err = rq->context->engine->emit_bb_start(rq,
							 eb->trampoline->node.start +
							 batch_len, 0, 0);
2384 2385 2386 2387
		if (err)
			return err;
	}

C
Chris Wilson 已提交
2388
	return 0;
2389 2390
}

M
Matthew Brost 已提交
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
static int eb_submit(struct i915_execbuffer *eb)
{
	unsigned int i;
	int err;

	err = eb_move_to_gpu(eb);

	for_each_batch_create_order(eb, i) {
		if (!eb->requests[i])
			break;

		trace_i915_request_queue(eb->requests[i], eb->batch_flags);
		if (!err)
			err = eb_request_submit(eb, eb->requests[i],
						eb->batches[i]->vma,
						eb->batch_len[i]);
	}

	return err;
}

2412
static int num_vcs_engines(struct drm_i915_private *i915)
2413
{
2414
	return hweight_long(VDBOX_MASK(to_gt(i915)));
2415 2416
}

2417
/*
2418
 * Find one BSD ring to dispatch the corresponding BSD command.
2419
 * The engine index is returned.
2420
 */
2421
static unsigned int
2422 2423
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
2424 2425 2426
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

2427
	/* Check whether the file_priv has already selected one ring. */
2428
	if ((int)file_priv->bsd_engine < 0)
2429 2430
		file_priv->bsd_engine =
			get_random_int() % num_vcs_engines(dev_priv);
2431

2432
	return file_priv->bsd_engine;
2433 2434
}

2435
static const enum intel_engine_id user_ring_map[] = {
2436 2437 2438 2439 2440
	[I915_EXEC_DEFAULT]	= RCS0,
	[I915_EXEC_RENDER]	= RCS0,
	[I915_EXEC_BLT]		= BCS0,
	[I915_EXEC_BSD]		= VCS0,
	[I915_EXEC_VEBOX]	= VECS0
2441 2442
};

2443
static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce)
2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476
{
	struct intel_ring *ring = ce->ring;
	struct intel_timeline *tl = ce->timeline;
	struct i915_request *rq;

	/*
	 * Completely unscientific finger-in-the-air estimates for suitable
	 * maximum user request size (to avoid blocking) and then backoff.
	 */
	if (intel_ring_update_space(ring) >= PAGE_SIZE)
		return NULL;

	/*
	 * Find a request that after waiting upon, there will be at least half
	 * the ring available. The hysteresis allows us to compete for the
	 * shared ring and should mean that we sleep less often prior to
	 * claiming our resources, but not so long that the ring completely
	 * drains before we can submit our next request.
	 */
	list_for_each_entry(rq, &tl->requests, link) {
		if (rq->ring != ring)
			continue;

		if (__intel_ring_space(rq->postfix,
				       ring->emit, ring->size) > ring->size / 2)
			break;
	}
	if (&rq->link == &tl->requests)
		return NULL; /* weird, we will check again later for real */

	return i915_request_get(rq);
}

M
Matthew Brost 已提交
2477 2478
static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
			   bool throttle)
2479 2480
{
	struct intel_timeline *tl;
2481
	struct i915_request *rq = NULL;
2482

2483 2484 2485 2486 2487 2488 2489 2490
	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
	 * we expect to access the hardware fairly frequently in the
	 * process, and require the engine to be kept awake between accesses.
	 * Upon dispatch, we acquire another prolonged wakeref that we hold
	 * until the timeline is idle, which in turn releases the wakeref
	 * taken on the engine, and the parent device.
	 */
2491
	tl = intel_context_timeline_lock(ce);
M
Matthew Brost 已提交
2492 2493
	if (IS_ERR(tl))
		return PTR_ERR(tl);
2494 2495

	intel_context_enter(ce);
2496 2497
	if (throttle)
		rq = eb_throttle(eb, ce);
2498 2499
	intel_context_timeline_unlock(tl);

M
Matthew Brost 已提交
2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
	if (rq) {
		bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
		long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT;

		if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
				      timeout) < 0) {
			i915_request_put(rq);

			tl = intel_context_timeline_lock(ce);
			intel_context_exit(ce);
			intel_context_timeline_unlock(tl);

			if (nonblock)
				return -EWOULDBLOCK;
			else
				return -EINTR;
		}
		i915_request_put(rq);
	}

	return 0;
}

static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
{
	struct intel_context *ce = eb->context, *child;
	int err;
	int i = 0, j = 0;

	GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);

	if (unlikely(intel_context_is_banned(ce)))
		return -EIO;

	/*
	 * Pinning the contexts may generate requests in order to acquire
	 * GGTT space, so do this first before we reserve a seqno for
	 * ourselves.
	 */
	err = intel_context_pin_ww(ce, &eb->ww);
	if (err)
		return err;
	for_each_child(ce, child) {
		err = intel_context_pin_ww(child, &eb->ww);
		GEM_BUG_ON(err);	/* perma-pinned should incr a counter */
	}

	for_each_child(ce, child) {
		err = eb_pin_timeline(eb, child, throttle);
		if (err)
			goto unwind;
		++i;
	}
	err = eb_pin_timeline(eb, ce, throttle);
	if (err)
		goto unwind;

2557
	eb->args->flags |= __EXEC_ENGINE_PINNED;
M
Matthew Brost 已提交
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
	return 0;

unwind:
	for_each_child(ce, child) {
		if (j++ < i) {
			mutex_lock(&child->timeline->mutex);
			intel_context_exit(child);
			mutex_unlock(&child->timeline->mutex);
		}
	}
	for_each_child(ce, child)
		intel_context_unpin(child);
	intel_context_unpin(ce);
	return err;
2572 2573
}

2574
static void eb_unpin_engine(struct i915_execbuffer *eb)
2575
{
M
Matthew Brost 已提交
2576
	struct intel_context *ce = eb->context, *child;
2577

2578 2579 2580 2581 2582
	if (!(eb->args->flags & __EXEC_ENGINE_PINNED))
		return;

	eb->args->flags &= ~__EXEC_ENGINE_PINNED;

M
Matthew Brost 已提交
2583 2584 2585 2586 2587 2588 2589 2590 2591
	for_each_child(ce, child) {
		mutex_lock(&child->timeline->mutex);
		intel_context_exit(child);
		mutex_unlock(&child->timeline->mutex);

		intel_context_unpin(child);
	}

	mutex_lock(&ce->timeline->mutex);
2592
	intel_context_exit(ce);
M
Matthew Brost 已提交
2593
	mutex_unlock(&ce->timeline->mutex);
2594

2595
	intel_context_unpin(ce);
2596
}
2597

2598
static unsigned int
2599
eb_select_legacy_ring(struct i915_execbuffer *eb)
2600
{
2601
	struct drm_i915_private *i915 = eb->i915;
2602
	struct drm_i915_gem_execbuffer2 *args = eb->args;
2603 2604
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;

2605 2606
	if (user_ring_id != I915_EXEC_BSD &&
	    (args->flags & I915_EXEC_BSD_MASK)) {
2607 2608 2609
		drm_dbg(&i915->drm,
			"execbuf with non bsd ring but with invalid "
			"bsd dispatch flags: %d\n", (int)(args->flags));
2610
		return -1;
2611 2612
	}

2613
	if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2614 2615 2616
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2617
			bsd_idx = gen8_dispatch_bsd_engine(i915, eb->file);
2618 2619
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2620
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2621 2622
			bsd_idx--;
		} else {
2623 2624 2625
			drm_dbg(&i915->drm,
				"execbuf with unknown bsd ring: %u\n",
				bsd_idx);
2626
			return -1;
2627 2628
		}

2629
		return _VCS(bsd_idx);
2630 2631
	}

2632
	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2633 2634
		drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
			user_ring_id);
2635
		return -1;
2636 2637
	}

2638 2639 2640 2641
	return user_ring_map[user_ring_id];
}

static int
2642
eb_select_engine(struct i915_execbuffer *eb)
2643
{
M
Matthew Brost 已提交
2644
	struct intel_context *ce, *child;
2645 2646 2647
	unsigned int idx;
	int err;

2648
	if (i915_gem_context_user_engines(eb->gem_context))
2649
		idx = eb->args->flags & I915_EXEC_RING_MASK;
2650
	else
2651
		idx = eb_select_legacy_ring(eb);
2652 2653 2654 2655 2656

	ce = i915_gem_context_get_engine(eb->gem_context, idx);
	if (IS_ERR(ce))
		return PTR_ERR(ce);

M
Matthew Brost 已提交
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670
	if (intel_context_is_parallel(ce)) {
		if (eb->buffer_count < ce->parallel.number_children + 1) {
			intel_context_put(ce);
			return -EINVAL;
		}
		if (eb->batch_start_offset || eb->args->batch_len) {
			intel_context_put(ce);
			return -EINVAL;
		}
	}
	eb->num_batches = ce->parallel.number_children + 1;

	for_each_child(ce, child)
		intel_context_get(child);
2671
	intel_gt_pm_get(ce->engine->gt);
2672

2673 2674 2675 2676 2677
	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
		err = intel_context_alloc_state(ce);
		if (err)
			goto err;
	}
M
Matthew Brost 已提交
2678 2679 2680 2681 2682 2683 2684
	for_each_child(ce, child) {
		if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) {
			err = intel_context_alloc_state(child);
			if (err)
				goto err;
		}
	}
2685 2686 2687 2688 2689 2690 2691 2692 2693 2694

	/*
	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
	 * EIO if the GPU is already wedged.
	 */
	err = intel_gt_terminally_wedged(ce->engine->gt);
	if (err)
		goto err;

	eb->context = ce;
M
Matthew Brost 已提交
2695
	eb->gt = ce->engine->gt;
2696 2697 2698 2699 2700 2701 2702 2703 2704 2705

	/*
	 * Make sure engine pool stays alive even if we call intel_context_put
	 * during ww handling. The pool is destroyed when last pm reference
	 * is dropped, which breaks our -EDEADLK handling.
	 */
	return err;

err:
	intel_gt_pm_put(ce->engine->gt);
M
Matthew Brost 已提交
2706 2707
	for_each_child(ce, child)
		intel_context_put(child);
2708
	intel_context_put(ce);
2709
	return err;
2710 2711
}

2712 2713 2714
static void
eb_put_engine(struct i915_execbuffer *eb)
{
M
Matthew Brost 已提交
2715 2716 2717 2718 2719
	struct intel_context *child;

	intel_gt_pm_put(eb->gt);
	for_each_child(eb->context, child)
		intel_context_put(child);
2720 2721 2722
	intel_context_put(eb->context);
}

2723
static void
2724
__free_fence_array(struct eb_fence *fences, unsigned int n)
2725
{
2726
	while (n--) {
2727
		drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2));
2728
		dma_fence_put(fences[n].dma_fence);
2729
		dma_fence_chain_free(fences[n].chain_fence);
2730
	}
2731 2732 2733
	kvfree(fences);
}

2734
static int
2735 2736
add_timeline_fence_array(struct i915_execbuffer *eb,
			 const struct drm_i915_gem_execbuffer_ext_timeline_fences *timeline_fences)
2737
{
2738 2739 2740 2741 2742
	struct drm_i915_gem_exec_fence __user *user_fences;
	u64 __user *user_values;
	struct eb_fence *f;
	u64 nfences;
	int err = 0;
2743

2744 2745
	nfences = timeline_fences->fence_count;
	if (!nfences)
2746
		return 0;
2747

2748 2749 2750
	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
	if (nfences > min_t(unsigned long,
2751 2752
			    ULONG_MAX / sizeof(*user_fences),
			    SIZE_MAX / sizeof(*f)) - eb->num_fences)
2753
		return -EINVAL;
2754

2755 2756 2757 2758 2759 2760
	user_fences = u64_to_user_ptr(timeline_fences->handles_ptr);
	if (!access_ok(user_fences, nfences * sizeof(*user_fences)))
		return -EFAULT;

	user_values = u64_to_user_ptr(timeline_fences->values_ptr);
	if (!access_ok(user_values, nfences * sizeof(*user_values)))
2761
		return -EFAULT;
2762

2763 2764 2765 2766
	f = krealloc(eb->fences,
		     (eb->num_fences + nfences) * sizeof(*f),
		     __GFP_NOWARN | GFP_KERNEL);
	if (!f)
2767
		return -ENOMEM;
2768

2769 2770 2771 2772 2773 2774 2775 2776
	eb->fences = f;
	f += eb->num_fences;

	BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
		     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);

	while (nfences--) {
		struct drm_i915_gem_exec_fence user_fence;
2777
		struct drm_syncobj *syncobj;
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798
		struct dma_fence *fence = NULL;
		u64 point;

		if (__copy_from_user(&user_fence,
				     user_fences++,
				     sizeof(user_fence)))
			return -EFAULT;

		if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
			return -EINVAL;

		if (__get_user(point, user_values++))
			return -EFAULT;

		syncobj = drm_syncobj_find(eb->file, user_fence.handle);
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
			return -ENOENT;
		}

		fence = drm_syncobj_fence_get(syncobj);
2799

2800 2801 2802 2803 2804
		if (!fence && user_fence.flags &&
		    !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
			DRM_DEBUG("Syncobj handle has no fence\n");
			drm_syncobj_put(syncobj);
			return -EINVAL;
2805 2806
		}

2807 2808 2809 2810 2811
		if (fence)
			err = dma_fence_chain_find_seqno(&fence, point);

		if (err && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
			DRM_DEBUG("Syncobj handle missing requested point %llu\n", point);
2812
			dma_fence_put(fence);
2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842
			drm_syncobj_put(syncobj);
			return err;
		}

		/*
		 * A point might have been signaled already and
		 * garbage collected from the timeline. In this case
		 * just ignore the point and carry on.
		 */
		if (!fence && !(user_fence.flags & I915_EXEC_FENCE_SIGNAL)) {
			drm_syncobj_put(syncobj);
			continue;
		}

		/*
		 * For timeline syncobjs we need to preallocate chains for
		 * later signaling.
		 */
		if (point != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) {
			/*
			 * Waiting and signaling the same point (when point !=
			 * 0) would break the timeline.
			 */
			if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
				DRM_DEBUG("Trying to wait & signal the same timeline point.\n");
				dma_fence_put(fence);
				drm_syncobj_put(syncobj);
				return -EINVAL;
			}

2843
			f->chain_fence = dma_fence_chain_alloc();
2844 2845 2846 2847 2848 2849 2850
			if (!f->chain_fence) {
				drm_syncobj_put(syncobj);
				dma_fence_put(fence);
				return -ENOMEM;
			}
		} else {
			f->chain_fence = NULL;
2851 2852
		}

2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
		f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
		f->dma_fence = fence;
		f->value = point;
		f++;
		eb->num_fences++;
	}

	return 0;
}

static int add_fence_array(struct i915_execbuffer *eb)
{
	struct drm_i915_gem_execbuffer2 *args = eb->args;
	struct drm_i915_gem_exec_fence __user *user;
	unsigned long num_fences = args->num_cliprects;
	struct eb_fence *f;

	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
		return 0;

	if (!num_fences)
		return 0;

	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
	if (num_fences > min_t(unsigned long,
			       ULONG_MAX / sizeof(*user),
			       SIZE_MAX / sizeof(*f) - eb->num_fences))
		return -EINVAL;

	user = u64_to_user_ptr(args->cliprects_ptr);
	if (!access_ok(user, num_fences * sizeof(*user)))
		return -EFAULT;

	f = krealloc(eb->fences,
		     (eb->num_fences + num_fences) * sizeof(*f),
		     __GFP_NOWARN | GFP_KERNEL);
	if (!f)
		return -ENOMEM;

	eb->fences = f;
	f += eb->num_fences;
	while (num_fences--) {
		struct drm_i915_gem_exec_fence user_fence;
		struct drm_syncobj *syncobj;
		struct dma_fence *fence = NULL;

		if (__copy_from_user(&user_fence, user++, sizeof(user_fence)))
			return -EFAULT;

		if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS)
			return -EINVAL;

		syncobj = drm_syncobj_find(eb->file, user_fence.handle);
2907 2908
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
			return -ENOENT;
		}

		if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
			fence = drm_syncobj_fence_get(syncobj);
			if (!fence) {
				DRM_DEBUG("Syncobj handle has no fence\n");
				drm_syncobj_put(syncobj);
				return -EINVAL;
			}
2919 2920
		}

2921 2922 2923
		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);

2924 2925 2926 2927 2928 2929
		f->syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
		f->dma_fence = fence;
		f->value = 0;
		f->chain_fence = NULL;
		f++;
		eb->num_fences++;
2930 2931
	}

2932
	return 0;
2933
}
2934

2935 2936 2937 2938
static void put_fence_array(struct eb_fence *fences, int num_fences)
{
	if (fences)
		__free_fence_array(fences, num_fences);
2939 2940 2941
}

static int
M
Matthew Brost 已提交
2942 2943
await_fence_array(struct i915_execbuffer *eb,
		  struct i915_request *rq)
2944 2945 2946 2947
{
	unsigned int n;
	int err;

2948
	for (n = 0; n < eb->num_fences; n++) {
2949 2950 2951
		struct drm_syncobj *syncobj;
		unsigned int flags;

2952
		syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
2953

2954 2955
		if (!eb->fences[n].dma_fence)
			continue;
2956

M
Matthew Brost 已提交
2957
		err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence);
2958 2959 2960 2961 2962 2963 2964
		if (err < 0)
			return err;
	}

	return 0;
}

M
Matthew Brost 已提交
2965 2966
static void signal_fence_array(const struct i915_execbuffer *eb,
			       struct dma_fence * const fence)
2967 2968 2969
{
	unsigned int n;

2970
	for (n = 0; n < eb->num_fences; n++) {
2971 2972 2973
		struct drm_syncobj *syncobj;
		unsigned int flags;

2974
		syncobj = ptr_unpack_bits(eb->fences[n].syncobj, &flags, 2);
2975 2976 2977
		if (!(flags & I915_EXEC_FENCE_SIGNAL))
			continue;

2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990
		if (eb->fences[n].chain_fence) {
			drm_syncobj_add_point(syncobj,
					      eb->fences[n].chain_fence,
					      fence,
					      eb->fences[n].value);
			/*
			 * The chain's ownership is transferred to the
			 * timeline.
			 */
			eb->fences[n].chain_fence = NULL;
		} else {
			drm_syncobj_replace_fence(syncobj, fence);
		}
2991 2992 2993
	}
}

2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005
static int
parse_timeline_fences(struct i915_user_extension __user *ext, void *data)
{
	struct i915_execbuffer *eb = data;
	struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;

	if (copy_from_user(&timeline_fences, ext, sizeof(timeline_fences)))
		return -EFAULT;

	return add_timeline_fence_array(eb, &timeline_fences);
}

3006 3007 3008 3009 3010 3011 3012 3013 3014
static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
{
	struct i915_request *rq, *rn;

	list_for_each_entry_safe(rq, rn, &tl->requests, link)
		if (rq == end || !i915_request_retire(rq))
			break;
}

M
Matthew Brost 已提交
3015 3016
static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
			  int err, bool last_parallel)
3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029
{
	struct intel_timeline * const tl = i915_request_timeline(rq);
	struct i915_sched_attr attr = {};
	struct i915_request *prev;

	lockdep_assert_held(&tl->mutex);
	lockdep_unpin_lock(&tl->mutex, rq->cookie);

	trace_i915_request_add(rq);

	prev = __i915_request_commit(rq);

	/* Check that the context wasn't destroyed before submission */
3030
	if (likely(!intel_context_is_closed(eb->context))) {
3031 3032 3033
		attr = eb->gem_context->sched;
	} else {
		/* Serialise with context_close via the add_to_timeline */
3034 3035
		i915_request_set_error_once(rq, -ENOENT);
		__i915_request_skip(rq);
3036
		err = -ENOENT; /* override any transient errors */
3037 3038
	}

M
Matthew Brost 已提交
3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
	if (intel_context_is_parallel(eb->context)) {
		if (err) {
			__i915_request_skip(rq);
			set_bit(I915_FENCE_FLAG_SKIP_PARALLEL,
				&rq->fence.flags);
		}
		if (last_parallel)
			set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL,
				&rq->fence.flags);
	}

3050 3051 3052 3053 3054 3055 3056
	__i915_request_queue(rq, &attr);

	/* Try to clean up the client's timeline after submitting the request */
	if (prev)
		retire_requests(tl, prev);

	mutex_unlock(&tl->mutex);
3057 3058

	return err;
3059 3060
}

M
Matthew Brost 已提交
3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
static int eb_requests_add(struct i915_execbuffer *eb, int err)
{
	int i;

	/*
	 * We iterate in reverse order of creation to release timeline mutexes in
	 * same order.
	 */
	for_each_batch_add_order(eb, i) {
		struct i915_request *rq = eb->requests[i];

		if (!rq)
			continue;
		err |= eb_request_add(eb, rq, err, i == 0);
	}

	return err;
}

3080
static const i915_user_extension_fn execbuf_extensions[] = {
3081
	[DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences,
3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
};

static int
parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args,
			  struct i915_execbuffer *eb)
{
	if (!(args->flags & I915_EXEC_USE_EXTENSIONS))
		return 0;

	/* The execbuf2 extension mechanism reuses cliprects_ptr. So we cannot
	 * have another flag also using it at the same time.
	 */
	if (eb->args->flags & I915_EXEC_FENCE_ARRAY)
		return -EINVAL;

	if (args->num_cliprects != 0)
		return -EINVAL;

	return i915_user_extensions(u64_to_user_ptr(args->cliprects_ptr),
				    execbuf_extensions,
				    ARRAY_SIZE(execbuf_extensions),
				    eb);
}

M
Matthew Brost 已提交
3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
static void eb_requests_get(struct i915_execbuffer *eb)
{
	unsigned int i;

	for_each_batch_create_order(eb, i) {
		if (!eb->requests[i])
			break;

		i915_request_get(eb->requests[i]);
	}
}

static void eb_requests_put(struct i915_execbuffer *eb)
{
	unsigned int i;

	for_each_batch_create_order(eb, i) {
		if (!eb->requests[i])
			break;

		i915_request_put(eb->requests[i]);
	}
}

static struct sync_file *
eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
{
	struct sync_file *out_fence = NULL;
	struct dma_fence_array *fence_array;
	struct dma_fence **fences;
	unsigned int i;

	GEM_BUG_ON(!intel_context_is_parent(eb->context));

	fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL);
	if (!fences)
		return ERR_PTR(-ENOMEM);

3144
	for_each_batch_create_order(eb, i) {
M
Matthew Brost 已提交
3145
		fences[i] = &eb->requests[i]->fence;
3146 3147 3148
		__set_bit(I915_FENCE_FLAG_COMPOSITE,
			  &eb->requests[i]->fence.flags);
	}
M
Matthew Brost 已提交
3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249

	fence_array = dma_fence_array_create(eb->num_batches,
					     fences,
					     eb->context->parallel.fence_context,
					     eb->context->parallel.seqno,
					     false);
	if (!fence_array) {
		kfree(fences);
		return ERR_PTR(-ENOMEM);
	}

	/* Move ownership to the dma_fence_array created above */
	for_each_batch_create_order(eb, i)
		dma_fence_get(fences[i]);

	if (out_fence_fd != -1) {
		out_fence = sync_file_create(&fence_array->base);
		/* sync_file now owns fence_arry, drop creation ref */
		dma_fence_put(&fence_array->base);
		if (!out_fence)
			return ERR_PTR(-ENOMEM);
	}

	eb->composite_fence = &fence_array->base;

	return out_fence;
}

static struct sync_file *
eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq,
	      struct dma_fence *in_fence, int out_fence_fd)
{
	struct sync_file *out_fence = NULL;
	int err;

	if (unlikely(eb->gem_context->syncobj)) {
		struct dma_fence *fence;

		fence = drm_syncobj_fence_get(eb->gem_context->syncobj);
		err = i915_request_await_dma_fence(rq, fence);
		dma_fence_put(fence);
		if (err)
			return ERR_PTR(err);
	}

	if (in_fence) {
		if (eb->args->flags & I915_EXEC_FENCE_SUBMIT)
			err = i915_request_await_execution(rq, in_fence);
		else
			err = i915_request_await_dma_fence(rq, in_fence);
		if (err < 0)
			return ERR_PTR(err);
	}

	if (eb->fences) {
		err = await_fence_array(eb, rq);
		if (err)
			return ERR_PTR(err);
	}

	if (intel_context_is_parallel(eb->context)) {
		out_fence = eb_composite_fence_create(eb, out_fence_fd);
		if (IS_ERR(out_fence))
			return ERR_PTR(-ENOMEM);
	} else if (out_fence_fd != -1) {
		out_fence = sync_file_create(&rq->fence);
		if (!out_fence)
			return ERR_PTR(-ENOMEM);
	}

	return out_fence;
}

static struct intel_context *
eb_find_context(struct i915_execbuffer *eb, unsigned int context_number)
{
	struct intel_context *child;

	if (likely(context_number == 0))
		return eb->context;

	for_each_child(eb->context, child)
		if (!--context_number)
			return child;

	GEM_BUG_ON("Context not found");

	return NULL;
}

static struct sync_file *
eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
		   int out_fence_fd)
{
	struct sync_file *out_fence = NULL;
	unsigned int i;

	for_each_batch_create_order(eb, i) {
		/* Allocate a request for this batch buffer nice and early. */
		eb->requests[i] = i915_request_create(eb_find_context(eb, i));
		if (IS_ERR(eb->requests[i])) {
3250
			out_fence = ERR_CAST(eb->requests[i]);
M
Matthew Brost 已提交
3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267
			eb->requests[i] = NULL;
			return out_fence;
		}

		/*
		 * Only the first request added (committed to backend) has to
		 * take the in fences into account as all subsequent requests
		 * will have fences inserted inbetween them.
		 */
		if (i + 1 == eb->num_batches) {
			out_fence = eb_fences_add(eb, eb->requests[i],
						  in_fence, out_fence_fd);
			if (IS_ERR(out_fence))
				return out_fence;
		}

		/*
3268 3269 3270
		 * Not really on stack, but we don't want to call
		 * kfree on the batch_snapshot when we put it, so use the
		 * _onstack interface.
M
Matthew Brost 已提交
3271
		 */
3272 3273 3274 3275
		if (eb->batches[i]->vma)
			i915_vma_snapshot_init_onstack(&eb->requests[i]->batch_snapshot,
						       eb->batches[i]->vma,
						       "batch");
M
Matthew Brost 已提交
3276 3277 3278 3279 3280 3281 3282 3283 3284 3285
		if (eb->batch_pool) {
			GEM_BUG_ON(intel_context_is_parallel(eb->context));
			intel_gt_buffer_pool_mark_active(eb->batch_pool,
							 eb->requests[i]);
		}
	}

	return out_fence;
}

3286
static int
3287
i915_gem_do_execbuffer(struct drm_device *dev,
3288 3289
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
3290
		       struct drm_i915_gem_exec_object2 *exec)
3291
{
3292
	struct drm_i915_private *i915 = to_i915(dev);
3293
	struct i915_execbuffer eb;
3294 3295 3296
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
3297
	int err;
3298

3299
	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
3300 3301
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
3302

3303
	eb.i915 = i915;
3304 3305
	eb.file = file;
	eb.args = args;
3306
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
3307
		args->flags |= __EXEC_HAS_RELOC;
3308

3309
	eb.exec = exec;
3310 3311
	eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
	eb.vma[0].vma = NULL;
D
Daniel Vetter 已提交
3312
	eb.batch_pool = NULL;
3313

3314
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
3315 3316
	reloc_cache_init(&eb.reloc_cache, eb.i915);

3317
	eb.buffer_count = args->buffer_count;
3318
	eb.batch_start_offset = args->batch_start_offset;
3319
	eb.trampoline = NULL;
3320

3321
	eb.fences = NULL;
3322
	eb.num_fences = 0;
3323

3324 3325
	eb_capture_list_clear(&eb);

M
Matthew Brost 已提交
3326 3327 3328 3329
	memset(eb.requests, 0, sizeof(struct i915_request *) *
	       ARRAY_SIZE(eb.requests));
	eb.composite_fence = NULL;

3330
	eb.batch_flags = 0;
3331
	if (args->flags & I915_EXEC_SECURE) {
3332
		if (GRAPHICS_VER(i915) >= 11)
3333 3334 3335 3336 3337 3338
			return -ENODEV;

		/* Return -EPERM to trigger fallback code on old binaries. */
		if (!HAS_SECURE_BATCHES(i915))
			return -EPERM;

3339
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
3340
			return -EPERM;
3341

3342
		eb.batch_flags |= I915_DISPATCH_SECURE;
3343
	}
3344
	if (args->flags & I915_EXEC_IS_PINNED)
3345
		eb.batch_flags |= I915_DISPATCH_PINNED;
3346

3347 3348 3349 3350 3351 3352 3353 3354
	err = parse_execbuf2_extensions(args, &eb);
	if (err)
		goto err_ext;

	err = add_fence_array(&eb);
	if (err)
		goto err_ext;

3355 3356 3357 3358 3359
#define IN_FENCES (I915_EXEC_FENCE_IN | I915_EXEC_FENCE_SUBMIT)
	if (args->flags & IN_FENCES) {
		if ((args->flags & IN_FENCES) == IN_FENCES)
			return -EINVAL;

3360
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
3361 3362 3363 3364
		if (!in_fence) {
			err = -EINVAL;
			goto err_ext;
		}
3365
	}
3366
#undef IN_FENCES
3367

3368 3369 3370
	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
3371
			err = out_fence_fd;
3372
			goto err_in_fence;
3373 3374 3375
		}
	}

3376 3377
	err = eb_create(&eb);
	if (err)
3378
		goto err_out_fence;
3379

3380
	GEM_BUG_ON(!eb.lut_size);
3381

3382 3383 3384 3385
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_destroy;

3386
	err = eb_select_engine(&eb);
3387
	if (unlikely(err))
3388
		goto err_context;
3389

3390 3391
	err = eb_lookup_vmas(&eb);
	if (err) {
3392
		eb_release_vmas(&eb, true);
3393 3394 3395 3396 3397
		goto err_engine;
	}

	i915_gem_ww_ctx_init(&eb.ww, true);

3398
	err = eb_relocate_parse(&eb);
3399
	if (err) {
3400 3401 3402 3403 3404 3405 3406 3407 3408
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
		goto err_vma;
3409
	}
3410

3411
	ww_acquire_done(&eb.ww.ctx);
3412
	eb_capture_stage(&eb);
3413

M
Matthew Brost 已提交
3414 3415 3416
	out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
	if (IS_ERR(out_fence)) {
		err = PTR_ERR(out_fence);
3417
		out_fence = NULL;
M
Matthew Brost 已提交
3418
		if (eb.requests[0])
3419
			goto err_request;
M
Matthew Brost 已提交
3420 3421
		else
			goto err_vma;
3422 3423
	}

M
Matthew Brost 已提交
3424
	err = eb_submit(&eb);
3425

3426
err_request:
M
Matthew Brost 已提交
3427 3428
	eb_requests_get(&eb);
	err = eb_requests_add(&eb, err);
3429

3430
	if (eb.fences)
M
Matthew Brost 已提交
3431 3432 3433
		signal_fence_array(&eb, eb.composite_fence ?
				   eb.composite_fence :
				   &eb.requests[0]->fence);
3434

3435
	if (out_fence) {
3436
		if (err == 0) {
3437
			fd_install(out_fence_fd, out_fence->file);
3438
			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
3439 3440 3441 3442 3443 3444
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
3445 3446 3447

	if (unlikely(eb.gem_context->syncobj)) {
		drm_syncobj_replace_fence(eb.gem_context->syncobj,
M
Matthew Brost 已提交
3448 3449 3450
					  eb.composite_fence ?
					  eb.composite_fence :
					  &eb.requests[0]->fence);
3451 3452
	}

M
Matthew Brost 已提交
3453 3454 3455 3456
	if (!out_fence && eb.composite_fence)
		dma_fence_put(eb.composite_fence);

	eb_requests_put(&eb);
3457

3458
err_vma:
3459
	eb_release_vmas(&eb, true);
3460 3461
	if (eb.trampoline)
		i915_vma_unpin(eb.trampoline);
3462 3463 3464 3465 3466 3467
	WARN_ON(err == -EDEADLK);
	i915_gem_ww_ctx_fini(&eb.ww);

	if (eb.batch_pool)
		intel_gt_buffer_pool_put(eb.batch_pool);
err_engine:
3468
	eb_put_engine(&eb);
3469
err_context:
3470
	i915_gem_context_put(eb.gem_context);
3471
err_destroy:
3472
	eb_destroy(&eb);
3473
err_out_fence:
3474 3475
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
3476
err_in_fence:
3477
	dma_fence_put(in_fence);
3478 3479
err_ext:
	put_fence_array(eb.fences, eb.num_fences);
3480
	return err;
3481 3482
}

3483 3484
static size_t eb_element_size(void)
{
3485
	return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500
}

static bool check_buffer_count(size_t count)
{
	const size_t sz = eb_element_size();

	/*
	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
	 * array size (see eb_create()). Otherwise, we can accept an array as
	 * large as can be addressed (though use large arrays at your peril)!
	 */

	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}

3501
int
3502 3503
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file)
3504
{
3505
	struct drm_i915_private *i915 = to_i915(dev);
3506
	struct drm_i915_gem_execbuffer2 *args = data;
3507
	struct drm_i915_gem_exec_object2 *exec2_list;
3508
	const size_t count = args->buffer_count;
3509
	int err;
3510

3511
	if (!check_buffer_count(count)) {
3512
		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
3513 3514 3515
		return -EINVAL;
	}

T
Tvrtko Ursulin 已提交
3516 3517 3518
	err = i915_gem_check_execbuffer(args);
	if (err)
		return err;
3519

3520 3521
	/* Allocate extra slots for use by the command parser */
	exec2_list = kvmalloc_array(count + 2, eb_element_size(),
3522
				    __GFP_NOWARN | GFP_KERNEL);
3523
	if (exec2_list == NULL) {
3524 3525
		drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
			count);
3526 3527
		return -ENOMEM;
	}
3528 3529
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
3530
			   sizeof(*exec2_list) * count)) {
3531
		drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
M
Michal Hocko 已提交
3532
		kvfree(exec2_list);
3533 3534 3535
		return -EFAULT;
	}

3536
	err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
3537 3538 3539 3540 3541 3542 3543 3544

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
3545
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
3546 3547
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
3548

3549
		/* Copy the new buffer offsets back to the user's exec list. */
3550 3551 3552 3553 3554 3555 3556
		/*
		 * Note: count * sizeof(*user_exec_list) does not overflow,
		 * because we checked 'count' in check_buffer_count().
		 *
		 * And this range already got effectively checked earlier
		 * when we did the "copy_from_user()" above.
		 */
3557 3558
		if (!user_write_access_begin(user_exec_list,
					     count * sizeof(*user_exec_list)))
3559
			goto end;
3560

3561
		for (i = 0; i < args->buffer_count; i++) {
3562 3563 3564
			if (!(exec2_list[i].offset & UPDATE))
				continue;

3565
			exec2_list[i].offset =
3566 3567 3568 3569
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
3570
		}
3571
end_user:
3572
		user_write_access_end();
3573
end:;
3574 3575
	}

3576
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
M
Michal Hocko 已提交
3577
	kvfree(exec2_list);
3578
	return err;
3579
}