i915_gem_execbuffer.c 80.7 KB
Newer Older
1
/*
2
 * SPDX-License-Identifier: MIT
3
 *
4
 * Copyright © 2008,2010 Intel Corporation
5 6
 */

7
#include <linux/intel-iommu.h>
8
#include <linux/dma-resv.h>
9
#include <linux/sync_file.h>
10 11
#include <linux/uaccess.h>

12
#include <drm/drm_syncobj.h>
13

14 15
#include "display/intel_frontbuffer.h"

16
#include "gem/i915_gem_ioctls.h"
17
#include "gt/intel_context.h"
18
#include "gt/intel_engine_pool.h"
19
#include "gt/intel_gt.h"
20
#include "gt/intel_gt_pm.h"
21
#include "gt/intel_ring.h"
22

23
#include "i915_drv.h"
24
#include "i915_gem_clflush.h"
25
#include "i915_gem_context.h"
26
#include "i915_gem_ioctls.h"
27
#include "i915_sw_fence_work.h"
28 29
#include "i915_trace.h"

30 31 32 33 34 35
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
36

37 38 39 40 41 42
#define __EXEC_OBJECT_HAS_REF		BIT(31)
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
43 44 45 46
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
#define __EXEC_VALIDATED	BIT(30)
47
#define __EXEC_INTERNAL_FLAGS	(~0u << 30)
48
#define UPDATE			PIN_OFFSET_FIXED
49 50

#define BATCH_OFFSET_BIAS (256*1024)
51

52
#define __I915_EXEC_ILLEGAL_FLAGS \
53 54 55
	(__I915_EXEC_UNKNOWN_FLAGS | \
	 I915_EXEC_CONSTANTS_MASK  | \
	 I915_EXEC_RESOURCE_STREAMER)
56

57 58 59 60 61 62 63 64 65
/* Catch emission of unexpected errors for CI! */
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#undef EINVAL
#define EINVAL ({ \
	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
	22; \
})
#endif

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
 * At the level of talking to the hardware, submitting a batchbuffer for the
 * GPU to execute is to add content to a buffer from which the HW
 * command streamer is reading.
 *
 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
 *    Execlists, this command is not placed on the same buffer as the
 *    remaining items.
 *
 * 2. Add a command to invalidate caches to the buffer.
 *
 * 3. Add a batchbuffer start command to the buffer; the start command is
 *    essentially a token together with the GPU address of the batchbuffer
 *    to be executed.
 *
 * 4. Add a pipeline flush to the buffer.
 *
 * 5. Add a memory write command to the buffer to record when the GPU
 *    is done executing the batchbuffer. The memory write writes the
 *    global sequence number of the request, ``i915_request::global_seqno``;
 *    the i915 driver uses the current value in the register to determine
 *    if the GPU has completed the batchbuffer.
 *
 * 6. Add a user interrupt command to the buffer. This command instructs
 *    the GPU to issue an interrupt when the command, pipeline flush and
 *    memory write are completed.
 *
 * 7. Inform the hardware of the additional commands added to the buffer
 *    (by updating the tail pointer).
 *
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

217
struct i915_execbuffer {
218 219 220 221
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
222 223
	struct i915_vma **vma;
	unsigned int *flags;
224 225

	struct intel_engine_cs *engine; /** engine to queue the request to */
226 227
	struct intel_context *context; /* logical state for the request */
	struct i915_gem_context *gem_context; /** caller's context */
228

229
	struct i915_request *request; /** our request to build */
230
	struct i915_vma *batch; /** identity of the batch obj/vma */
231
	struct i915_vma *trampoline; /** trampoline used for chaining */
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
247
	struct reloc_cache {
248 249 250
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
251
		unsigned int gen; /** Cached value of INTEL_GEN */
252
		bool use_64bit_reloc : 1;
253 254 255
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
256

257
		struct i915_request *rq;
258 259
		u32 *rq_cmd;
		unsigned int rq_size;
260
	} reloc_cache;
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
276 277
};

278
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
279

280 281
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
282
	return intel_engine_requires_cmd_parser(eb->engine) ||
283 284
		(intel_engine_using_cmd_parser(eb->engine) &&
		 eb->args->batch_len);
285 286
}

287
static int eb_create(struct i915_execbuffer *eb)
288
{
289 290
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
291

292 293 294 295 296 297 298 299 300 301 302
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
303
		do {
304
			gfp_t flags;
305 306 307 308 309 310 311

			/* While we can still reduce the allocation size, don't
			 * raise a warning and allow the allocation to fail.
			 * On the last pass though, we want to try as hard
			 * as possible to perform the allocation and warn
			 * if it fails.
			 */
312
			flags = GFP_KERNEL;
313 314 315
			if (size > 1)
				flags |= __GFP_NORETRY | __GFP_NOWARN;

316
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
317
					      flags);
318 319 320 321
			if (eb->buckets)
				break;
		} while (--size);

322 323
		if (unlikely(!size))
			return -ENOMEM;
324

325
		eb->lut_size = size;
326
	} else {
327
		eb->lut_size = -eb->buffer_count;
328
	}
329

330
	return 0;
331 332
}

333 334
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
335 336
		 const struct i915_vma *vma,
		 unsigned int flags)
337 338 339 340 341 342 343
{
	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

344
	if (flags & EXEC_OBJECT_PINNED &&
345 346 347
	    vma->node.start != entry->offset)
		return true;

348
	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
349 350 351
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

352
	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
353 354 355
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

356 357 358 359
	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
		return true;

360 361 362
	return false;
}

363
static inline bool
364
eb_pin_vma(struct i915_execbuffer *eb,
365
	   const struct drm_i915_gem_exec_object2 *entry,
366 367
	   struct i915_vma *vma)
{
368 369
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
370

371
	if (vma->node.size)
372
		pin_flags = vma->node.start;
373
	else
374
		pin_flags = entry->offset & PIN_OFFSET_MASK;
375

376 377 378
	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
		pin_flags |= PIN_GLOBAL;
379

380 381
	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
		return false;
382

383
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
384
		if (unlikely(i915_vma_pin_fence(vma))) {
385
			i915_vma_unpin(vma);
386
			return false;
387 388
		}

389
		if (vma->fence)
390
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
391 392
	}

393 394
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	return !eb_vma_misplaced(entry, vma, exec_flags);
395 396
}

397
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
398
{
399
	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
400

401
	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
402
		__i915_vma_unpin_fence(vma);
403

404
	__i915_vma_unpin(vma);
405 406
}

407
static inline void
408
eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
409
{
410
	if (!(*flags & __EXEC_OBJECT_HAS_PIN))
411
		return;
412

413 414
	__eb_unreserve_vma(vma, *flags);
	*flags &= ~__EXEC_OBJECT_RESERVED;
415 416
}

417 418 419 420
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
421
{
422
	struct drm_i915_private *i915 = eb->i915;
423 424
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
425

426 427 428 429 430 431 432 433
	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
434
		     entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
435 436 437 438 439 440 441 442
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
443 444
	}

445
	if (unlikely(vma->exec_flags)) {
446 447 448
		drm_dbg(&i915->drm,
			"Object [handle %d, index %d] appears more than once in object list\n",
			entry->handle, (int)(entry - eb->exec));
449 450 451 452 453 454 455 456 457 458
		return -EINVAL;
	}

	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

459 460 461 462 463 464 465 466 467 468 469 470
	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

471
	return 0;
472 473
}

474
static int
475 476 477
eb_add_vma(struct i915_execbuffer *eb,
	   unsigned int i, unsigned batch_idx,
	   struct i915_vma *vma)
478
{
479
	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
480 481 482 483 484 485 486 487
	int err;

	GEM_BUG_ON(i915_vma_is_closed(vma));

	if (!(eb->args->flags & __EXEC_VALIDATED)) {
		err = eb_validate_vma(eb, entry, vma);
		if (unlikely(err))
			return err;
488 489
	}

490
	if (eb->lut_size > 0) {
491
		vma->exec_handle = entry->handle;
492
		hlist_add_head(&vma->exec_node,
493 494
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
495
	}
496

497 498 499 500 501 502 503 504 505
	if (entry->relocation_count)
		list_add_tail(&vma->reloc_link, &eb->relocs);

	/*
	 * Stash a pointer from the vma to execobj, so we can query its flags,
	 * size, alignment etc as provided by the user. Also we stash a pointer
	 * to the vma inside the execobj so that we can use a direct lookup
	 * to find the right target VMA when doing relocations.
	 */
506
	eb->vma[i] = vma;
507
	eb->flags[i] = entry->flags;
508
	vma->exec_flags = &eb->flags[i];
509

510 511 512 513 514 515 516 517 518 519
	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	if (i == batch_idx) {
520 521
		if (entry->relocation_count &&
		    !(eb->flags[i] & EXEC_OBJECT_PINNED))
522 523 524 525 526 527 528
			eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
		if (eb->reloc_cache.has_fence)
			eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;

		eb->batch = vma;
	}

529
	err = 0;
530
	if (eb_pin_vma(eb, entry, vma)) {
531 532 533 534
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
535 536 537 538 539 540
	} else {
		eb_unreserve_vma(vma, vma->exec_flags);

		list_add_tail(&vma->exec_link, &eb->unbound);
		if (drm_mm_node_allocated(&vma->node))
			err = i915_vma_unbind(vma);
541 542
		if (unlikely(err))
			vma->exec_flags = NULL;
543 544 545 546 547 548 549 550 551 552
	}
	return err;
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

553 554 555 556 557
	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;
558 559 560 561 562 563 564

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
565 566
			  struct i915_vma *vma,
			  u64 pin_flags)
567
{
568 569
	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
	unsigned int exec_flags = *vma->exec_flags;
570 571
	int err;

572 573
	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
		pin_flags |= PIN_GLOBAL;
574 575 576 577 578

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
579 580
	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		pin_flags |= PIN_ZONE_4G;
581

582 583
	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
		pin_flags |= PIN_MAPPABLE;
584

585
	if (exec_flags & EXEC_OBJECT_PINNED)
586
		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
587
	else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
588
		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
589

590 591 592
	err = i915_vma_pin(vma,
			   entry->pad_to_size, entry->alignment,
			   pin_flags);
593 594 595 596 597 598 599 600
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

601
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
602
		err = i915_vma_pin_fence(vma);
603 604 605 606 607
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

608
		if (vma->fence)
609
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
610 611
	}

612 613
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
614

615 616 617 618 619 620
	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
621
	unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
	struct list_head last;
	struct i915_vma *vma;
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
		list_for_each_entry(vma, &eb->unbound, exec_link) {
645
			err = eb_reserve_vma(eb, vma, pin_flags);
646 647 648 649 650 651 652 653 654 655
			if (err)
				break;
		}
		if (err != -ENOSPC)
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
656 657
			unsigned int flags = eb->flags[i];
			struct i915_vma *vma = eb->vma[i];
658

659 660
			if (flags & EXEC_OBJECT_PINNED &&
			    flags & __EXEC_OBJECT_HAS_PIN)
661 662
				continue;

663
			eb_unreserve_vma(vma, &eb->flags[i]);
664

665
			if (flags & EXEC_OBJECT_PINNED)
666
				/* Pinned must have their slot */
667
				list_add(&vma->exec_link, &eb->unbound);
668
			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
669
				/* Map require the lowest 256MiB (aperture) */
670
				list_add_tail(&vma->exec_link, &eb->unbound);
671 672 673
			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
				/* Prioritise 4GiB region for restricted bo */
				list_add(&vma->exec_link, &last);
674 675 676 677 678 679 680 681 682 683 684
			else
				list_add_tail(&vma->exec_link, &last);
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
685
			mutex_lock(&eb->context->vm->mutex);
686
			err = i915_gem_evict_vm(eb->context->vm);
687
			mutex_unlock(&eb->context->vm->mutex);
688 689 690 691 692 693 694
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
695 696

		pin_flags = PIN_USER;
697
	} while (1);
698
}
699

700 701
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
702 703 704 705
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
706 707 708 709 710 711 712
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
713 714
	if (unlikely(!ctx))
		return -ENOENT;
715

716
	eb->gem_context = ctx;
717
	if (rcu_access_pointer(ctx->vm))
718
		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
719 720

	eb->context_flags = 0;
721
	if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
722 723 724 725 726 727
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
728
{
729
	struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
730
	struct drm_i915_gem_object *obj;
731
	unsigned int i, batch;
732
	int err;
733

734 735
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
736

737 738
	batch = eb_batch_index(eb);

739 740 741 742 743 744
	mutex_lock(&eb->gem_context->mutex);
	if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
		err = -ENOENT;
		goto err_ctx;
	}

745 746
	for (i = 0; i < eb->buffer_count; i++) {
		u32 handle = eb->exec[i].handle;
747
		struct i915_lut_handle *lut;
748
		struct i915_vma *vma;
749

750 751
		vma = radix_tree_lookup(handles_vma, handle);
		if (likely(vma))
752
			goto add_vma;
753

754
		obj = i915_gem_object_lookup(eb->file, handle);
755
		if (unlikely(!obj)) {
756
			err = -ENOENT;
757
			goto err_vma;
758 759
		}

760
		vma = i915_vma_instance(obj, eb->context->vm, NULL);
761
		if (IS_ERR(vma)) {
762
			err = PTR_ERR(vma);
763
			goto err_obj;
764 765
		}

766
		lut = i915_lut_handle_alloc();
767 768 769 770 771 772 773
		if (unlikely(!lut)) {
			err = -ENOMEM;
			goto err_obj;
		}

		err = radix_tree_insert(handles_vma, handle, vma);
		if (unlikely(err)) {
774
			i915_lut_handle_free(lut);
775
			goto err_obj;
776
		}
777

778 779
		/* transfer ref to lut */
		if (!atomic_fetch_inc(&vma->open_count))
780
			i915_vma_reopen(vma);
781
		lut->handle = handle;
782 783 784 785 786
		lut->ctx = eb->gem_context;

		i915_gem_object_lock(obj);
		list_add(&lut->obj_link, &obj->lut_list);
		i915_gem_object_unlock(obj);
787

788
add_vma:
789
		err = eb_add_vma(eb, i, batch, vma);
790
		if (unlikely(err))
791
			goto err_vma;
792

793 794
		GEM_BUG_ON(vma != eb->vma[i]);
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
795 796
		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
			   eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
797 798
	}

799 800
	mutex_unlock(&eb->gem_context->mutex);

801 802 803
	eb->args->flags |= __EXEC_VALIDATED;
	return eb_reserve(eb);

804
err_obj:
805
	i915_gem_object_put(obj);
806 807
err_vma:
	eb->vma[i] = NULL;
808 809
err_ctx:
	mutex_unlock(&eb->gem_context->mutex);
810
	return err;
811 812
}

813
static struct i915_vma *
814
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
815
{
816 817
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
818
			return NULL;
819
		return eb->vma[handle];
820 821
	} else {
		struct hlist_head *head;
822
		struct i915_vma *vma;
823

824
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
825
		hlist_for_each_entry(vma, head, exec_node) {
826 827
			if (vma->exec_handle == handle)
				return vma;
828 829 830
		}
		return NULL;
	}
831 832
}

833
static void eb_release_vmas(const struct i915_execbuffer *eb)
834
{
835 836 837 838
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
839 840
		struct i915_vma *vma = eb->vma[i];
		unsigned int flags = eb->flags[i];
841

842
		if (!vma)
843
			break;
844

845 846 847
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
		vma->exec_flags = NULL;
		eb->vma[i] = NULL;
848

849 850
		if (flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, flags);
851

852
		if (flags & __EXEC_OBJECT_HAS_REF)
853
			i915_vma_put(vma);
854
	}
855 856
}

857
static void eb_reset_vmas(const struct i915_execbuffer *eb)
858
{
859
	eb_release_vmas(eb);
860
	if (eb->lut_size > 0)
861 862
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_size);
863 864
}

865
static void eb_destroy(const struct i915_execbuffer *eb)
866
{
867 868
	GEM_BUG_ON(eb->reloc_cache.rq);

869
	if (eb->lut_size > 0)
870
		kfree(eb->buckets);
871 872
}

873
static inline u64
874
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
875
		  const struct i915_vma *target)
876
{
877
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
878 879
}

880 881
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
882
{
883
	cache->page = -1;
884
	cache->vaddr = 0;
885
	/* Must be a variable in the struct to allow GCC to unroll. */
886
	cache->gen = INTEL_GEN(i915);
887
	cache->has_llc = HAS_LLC(i915);
888
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
889 890
	cache->has_fence = cache->gen < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
891
	cache->node.flags = 0;
892 893
	cache->rq = NULL;
	cache->rq_size = 0;
894
}
895

896 897 898 899 900 901 902 903
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
904 905
}

906 907
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

908 909 910 911 912 913 914
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

915 916 917 918
static void reloc_gpu_flush(struct reloc_cache *cache)
{
	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
919 920

	__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
921
	i915_gem_object_unpin_map(cache->rq->batch->obj);
922

923
	intel_gt_chipset_flush(cache->rq->engine->gt);
924

925
	i915_request_add(cache->rq);
926 927 928
	cache->rq = NULL;
}

929
static void reloc_cache_reset(struct reloc_cache *cache)
930
{
931
	void *vaddr;
932

933 934 935
	if (cache->rq)
		reloc_gpu_flush(cache);

936 937
	if (!cache->vaddr)
		return;
938

939 940 941 942
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
943

944
		kunmap_atomic(vaddr);
945
		i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
946
	} else {
947 948 949
		struct i915_ggtt *ggtt = cache_to_ggtt(cache);

		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
950
		io_mapping_unmap_atomic((void __iomem *)vaddr);
951

952
		if (drm_mm_node_allocated(&cache->node)) {
953 954 955
			ggtt->vm.clear_range(&ggtt->vm,
					     cache->node.start,
					     cache->node.size);
956
			mutex_lock(&ggtt->vm.mutex);
957
			drm_mm_remove_node(&cache->node);
958
			mutex_unlock(&ggtt->vm.mutex);
959 960
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
961
		}
962
	}
963 964 965

	cache->vaddr = 0;
	cache->page = -1;
966 967 968 969
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
970
			unsigned long page)
971
{
972 973 974 975 976 977
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
978
		int err;
979

980
		err = i915_gem_object_prepare_write(obj, &flushes);
981 982
		if (err)
			return ERR_PTR(err);
983 984 985

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
986

987 988 989 990
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
991 992
	}

993 994
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
995
	cache->page = page;
996

997
	return vaddr;
998 999
}

1000 1001
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1002
			 unsigned long page)
1003
{
1004
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1005
	unsigned long offset;
1006
	void *vaddr;
1007

1008
	if (cache->vaddr) {
1009
		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1010
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1011 1012
	} else {
		struct i915_vma *vma;
1013
		int err;
1014

1015 1016 1017
		if (i915_gem_object_is_tiled(obj))
			return ERR_PTR(-EINVAL);

1018
		if (use_cpu_reloc(cache, obj))
1019
			return NULL;
1020

1021
		i915_gem_object_lock(obj);
1022
		err = i915_gem_object_set_to_gtt_domain(obj, true);
1023
		i915_gem_object_unlock(obj);
1024 1025
		if (err)
			return ERR_PTR(err);
1026

1027
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1028
					       PIN_MAPPABLE |
1029 1030
					       PIN_NONBLOCK /* NOWARN */ |
					       PIN_NOEVICT);
1031 1032
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1033
			mutex_lock(&ggtt->vm.mutex);
1034
			err = drm_mm_insert_node_in_range
1035
				(&ggtt->vm.mm, &cache->node,
1036
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1037
				 0, ggtt->mappable_end,
1038
				 DRM_MM_INSERT_LOW);
1039
			mutex_unlock(&ggtt->vm.mutex);
1040
			if (err) /* no inactive aperture space, use cpu reloc */
1041
				return NULL;
1042 1043 1044
		} else {
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1045
		}
1046
	}
1047

1048
	offset = cache->node.start;
1049
	if (drm_mm_node_allocated(&cache->node)) {
1050 1051 1052
		ggtt->vm.insert_page(&ggtt->vm,
				     i915_gem_object_get_dma_address(obj, page),
				     offset, I915_CACHE_NONE, 0);
1053 1054
	} else {
		offset += page << PAGE_SHIFT;
1055 1056
	}

1057
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1058
							 offset);
1059 1060
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1061

1062
	return vaddr;
1063 1064
}

1065 1066
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1067
			 unsigned long page)
1068
{
1069
	void *vaddr;
1070

1071 1072 1073 1074 1075 1076 1077 1078
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1079 1080
	}

1081
	return vaddr;
1082 1083
}

1084
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1085
{
1086 1087 1088 1089 1090
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1091

1092
		*addr = value;
1093

1094 1095
		/*
		 * Writes to the same cacheline are serialised by the CPU
1096 1097 1098 1099 1100 1101 1102 1103 1104
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1105 1106
}

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
	int err;

	i915_vma_lock(vma);

	if (obj->cache_dirty & ~obj->cache_coherent)
		i915_gem_clflush_object(obj, 0);
	obj->write_domain = 0;

	err = i915_request_await_object(rq, vma->obj, true);
	if (err == 0)
		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);

	i915_vma_unlock(vma);

	return err;
}

1127 1128 1129 1130 1131
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     struct i915_vma *vma,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
1132
	struct intel_engine_pool_node *pool;
1133
	struct i915_request *rq;
1134 1135 1136 1137
	struct i915_vma *batch;
	u32 *cmd;
	int err;

1138
	pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
1139 1140
	if (IS_ERR(pool))
		return PTR_ERR(pool);
1141

1142
	cmd = i915_gem_object_pin_map(pool->obj,
1143 1144 1145
				      cache->has_llc ?
				      I915_MAP_FORCE_WB :
				      I915_MAP_FORCE_WC);
1146 1147 1148 1149
	if (IS_ERR(cmd)) {
		err = PTR_ERR(cmd);
		goto out_pool;
	}
1150

1151
	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
1152 1153 1154 1155 1156 1157 1158 1159 1160
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
	}

	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
	if (err)
		goto err_unmap;

1161
	rq = i915_request_create(eb->context);
1162 1163 1164 1165 1166
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

1167 1168 1169 1170
	err = intel_engine_pool_mark_active(pool, rq);
	if (err)
		goto err_request;

1171
	err = reloc_move_to_gpu(rq, vma);
1172 1173 1174 1175 1176 1177 1178
	if (err)
		goto err_request;

	err = eb->engine->emit_bb_start(rq,
					batch->node.start, PAGE_SIZE,
					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
	if (err)
1179
		goto skip_request;
1180

1181
	i915_vma_lock(batch);
1182 1183 1184
	err = i915_request_await_object(rq, batch->obj, false);
	if (err == 0)
		err = i915_vma_move_to_active(batch, rq, 0);
1185
	i915_vma_unlock(batch);
1186 1187
	if (err)
		goto skip_request;
1188 1189

	rq->batch = batch;
1190
	i915_vma_unpin(batch);
1191 1192 1193 1194 1195 1196

	cache->rq = rq;
	cache->rq_cmd = cmd;
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
1197
	goto out_pool;
1198

1199 1200
skip_request:
	i915_request_skip(rq, err);
1201
err_request:
1202
	i915_request_add(rq);
1203 1204 1205
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
1206 1207 1208
	i915_gem_object_unpin_map(pool->obj);
out_pool:
	intel_engine_pool_put(pool);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
	return err;
}

static u32 *reloc_gpu(struct i915_execbuffer *eb,
		      struct i915_vma *vma,
		      unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	u32 *cmd;

	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
		reloc_gpu_flush(cache);

	if (unlikely(!cache->rq)) {
		int err;

1225 1226 1227
		if (!intel_engine_can_store_dword(eb->engine))
			return ERR_PTR(-ENODEV);

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
		err = __reloc_gpu_alloc(eb, vma, len);
		if (unlikely(err))
			return ERR_PTR(err);
	}

	cmd = cache->rq_cmd + cache->rq_size;
	cache->rq_size += len;

	return cmd;
}

1239 1240
static u64
relocate_entry(struct i915_vma *vma,
1241
	       const struct drm_i915_gem_relocation_entry *reloc,
1242 1243
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1244
{
1245
	u64 offset = reloc->offset;
1246 1247
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1248
	void *vaddr;
1249

1250 1251
	if (!eb->reloc_cache.vaddr &&
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1252
	     !dma_resv_test_signaled_rcu(vma->resv, true))) {
1253 1254 1255 1256 1257 1258 1259 1260 1261
		const unsigned int gen = eb->reloc_cache.gen;
		unsigned int len;
		u32 *batch;
		u64 addr;

		if (wide)
			len = offset & 7 ? 8 : 5;
		else if (gen >= 4)
			len = 4;
1262
		else
1263
			len = 3;
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308

		batch = reloc_gpu(eb, vma, len);
		if (IS_ERR(batch))
			goto repeat;

		addr = gen8_canonical_addr(vma->node.start + offset);
		if (wide) {
			if (offset & 7) {
				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);

				addr = gen8_canonical_addr(addr + 4);

				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = upper_32_bits(target_offset);
			} else {
				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);
				*batch++ = upper_32_bits(target_offset);
			}
		} else if (gen >= 6) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else if (gen >= 4) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else {
			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
			*batch++ = addr;
			*batch++ = target_offset;
		}

		goto out;
	}

1309
repeat:
1310
	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1311 1312 1313 1314 1315
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1316
			eb->reloc_cache.vaddr);
1317 1318 1319 1320 1321 1322

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1323 1324
	}

1325
out:
1326
	return target->node.start | UPDATE;
1327 1328
}

1329 1330 1331 1332
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
		  struct i915_vma *vma,
		  const struct drm_i915_gem_relocation_entry *reloc)
1333
{
1334
	struct drm_i915_private *i915 = eb->i915;
1335
	struct i915_vma *target;
1336
	int err;
1337

1338
	/* we've already hold a reference to all valid objects */
1339 1340
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1341
		return -ENOENT;
1342

1343
	/* Validate that the target is in a valid r/w GPU domain */
1344
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1345
		drm_dbg(&i915->drm, "reloc with multiple write domains: "
1346
			  "target %d offset %d "
1347
			  "read %08x write %08x",
1348
			  reloc->target_handle,
1349 1350 1351
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1352
		return -EINVAL;
1353
	}
1354 1355
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1356
		drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
1357
			  "target %d offset %d "
1358
			  "read %08x write %08x",
1359
			  reloc->target_handle,
1360 1361 1362
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1363
		return -EINVAL;
1364 1365
	}

1366
	if (reloc->write_domain) {
1367
		*target->exec_flags |= EXEC_OBJECT_WRITE;
1368

1369 1370 1371 1372 1373 1374 1375
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1376
		    IS_GEN(eb->i915, 6)) {
1377
			err = i915_vma_bind(target, target->obj->cache_level,
1378
					    PIN_GLOBAL, NULL);
1379 1380 1381 1382
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1383
	}
1384

1385 1386
	/*
	 * If the relocation already has the right value in it, no
1387 1388
	 * more work needs to be done.
	 */
1389 1390
	if (!DBG_FORCE_RELOC &&
	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1391
		return 0;
1392 1393

	/* Check that the relocation address is valid... */
1394
	if (unlikely(reloc->offset >
1395
		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1396
		drm_dbg(&i915->drm, "Relocation beyond object bounds: "
1397 1398 1399 1400
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
			  (int)vma->size);
1401
		return -EINVAL;
1402
	}
1403
	if (unlikely(reloc->offset & 3)) {
1404
		drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
1405 1406 1407
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1408
		return -EINVAL;
1409 1410
	}

1411 1412 1413 1414 1415 1416
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
1417
	 * out of our synchronisation.
1418
	 */
1419
	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1420

1421
	/* and update the user's relocation entry */
1422
	return relocate_entry(vma, reloc, eb, target);
1423 1424
}

1425
static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1426
{
1427
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1428 1429
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
1430
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1431
	unsigned int remain;
1432

1433
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1434
	remain = entry->relocation_count;
1435 1436
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1437

1438 1439 1440 1441 1442
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
1443
	if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1444 1445 1446 1447 1448 1449 1450
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1451

1452 1453
		/*
		 * This is the fast path and we cannot handle a pagefault
1454 1455 1456 1457 1458 1459 1460
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1461
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1462
		pagefault_enable();
1463 1464
		if (unlikely(copied)) {
			remain = -EFAULT;
1465 1466
			goto out;
		}
1467

1468
		remain -= count;
1469
		do {
1470
			u64 offset = eb_relocate_entry(eb, vma, r);
1471

1472 1473 1474
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1475
				goto out;
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
1499 1500 1501 1502
				if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
					remain = -EFAULT;
					goto out;
				}
1503
			}
1504 1505 1506
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1507
out:
1508
	reloc_cache_reset(&eb->reloc_cache);
1509
	return remain;
1510 1511 1512
}

static int
1513
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1514
{
1515
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1516 1517 1518 1519
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1520 1521

	for (i = 0; i < entry->relocation_count; i++) {
1522
		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1523

1524 1525 1526 1527
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1528
	}
1529 1530 1531 1532
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1533 1534
}

1535
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1536
{
1537 1538 1539
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1540

1541 1542 1543
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1544

1545 1546
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1547

1548 1549
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
1550
	if (!access_ok(addr, size))
1551
		return -EFAULT;
1552

1553 1554 1555 1556 1557
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1558
	}
1559
	return __get_user(c, end - 1);
1560
}
1561

1562
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1563
{
1564
	struct drm_i915_gem_relocation_entry *relocs;
1565 1566 1567
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1568

1569 1570 1571 1572 1573
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		unsigned long size;
		unsigned long copied;
1574

1575 1576
		if (nreloc == 0)
			continue;
1577

1578 1579 1580
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1581

1582 1583
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1584

1585
		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1586 1587 1588 1589
		if (!relocs) {
			err = -ENOMEM;
			goto err;
		}
1590

1591 1592 1593 1594 1595 1596 1597
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
1598
					     (char __user *)urelocs + copied,
1599 1600
					     len))
				goto end;
1601

1602 1603
			copied += len;
		} while (copied < size);
1604

1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
1615
		if (!user_access_begin(urelocs, size))
1616
			goto end;
1617

1618 1619 1620 1621 1622
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
		user_access_end();
1623

1624 1625
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1626

1627
	return 0;
1628

1629 1630 1631 1632 1633
end_user:
	user_access_end();
end:
	kvfree(relocs);
	err = -EFAULT;
1634 1635
err:
	while (i--) {
1636
		relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1637 1638 1639 1640
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1641 1642
}

1643
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1644
{
1645 1646
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1647

1648 1649
	for (i = 0; i < count; i++) {
		int err;
1650

1651 1652 1653 1654
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1655

1656
	return 0;
1657 1658
}

1659
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1660
{
1661
	struct drm_device *dev = &eb->i915->drm;
1662
	bool have_copy = false;
1663
	struct i915_vma *vma;
1664 1665 1666 1667 1668 1669 1670
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1671

1672
	/* We may process another execbuffer during the unlock... */
1673
	eb_reset_vmas(eb);
1674 1675
	mutex_unlock(&dev->struct_mutex);

1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1697
	}
1698 1699 1700
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1701 1702
	}

1703 1704 1705
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1706 1707
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1708
		mutex_lock(&dev->struct_mutex);
1709
		goto out;
1710 1711
	}

1712
	/* reacquire the objects */
1713 1714
	err = eb_lookup_vmas(eb);
	if (err)
1715
		goto err;
1716

1717 1718
	GEM_BUG_ON(!eb->batch);

1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730
	list_for_each_entry(vma, &eb->relocs, reloc_link) {
		if (!have_copy) {
			pagefault_disable();
			err = eb_relocate_vma(eb, vma);
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
			err = eb_relocate_vma_slow(eb, vma);
			if (err)
				goto err;
		}
1731 1732
	}

1733 1734
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1735 1736 1737 1738 1739 1740
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

1762
	return err;
1763 1764
}

1765
static int eb_relocate(struct i915_execbuffer *eb)
1766
{
1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
	if (eb_lookup_vmas(eb))
		goto slow;

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, vma))
				goto slow;
		}
	}

	return 0;

slow:
	return eb_relocate_slow(eb);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
1789
	struct ww_acquire_ctx acquire;
1790
	unsigned int i;
1791 1792 1793
	int err = 0;

	ww_acquire_init(&acquire, &reservation_ww_class);
1794

1795
	for (i = 0; i < count; i++) {
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
		struct i915_vma *vma = eb->vma[i];

		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
		if (!err)
			continue;

		GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */

		if (err == -EDEADLK) {
			GEM_BUG_ON(i == 0);
			do {
				int j = i - 1;

				ww_mutex_unlock(&eb->vma[j]->resv->lock);

				swap(eb->flags[i], eb->flags[j]);
				swap(eb->vma[i],  eb->vma[j]);
				eb->vma[i]->exec_flags = &eb->flags[i];
			} while (--i);
			GEM_BUG_ON(vma != eb->vma[0]);
			vma->exec_flags = &eb->flags[0];

			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
							       &acquire);
		}
		if (err)
			break;
	}
	ww_acquire_done(&acquire);

	while (i--) {
1827 1828
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];
1829
		struct drm_i915_gem_object *obj = vma->obj;
1830

1831 1832
		assert_vma_held(vma);

1833
		if (flags & EXEC_OBJECT_CAPTURE) {
1834
			struct i915_capture_list *capture;
1835 1836

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1837 1838 1839 1840 1841
			if (capture) {
				capture->next = eb->request->capture_list;
				capture->vma = vma;
				eb->request->capture_list = capture;
			}
1842 1843
		}

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
		/*
		 * If the GPU is not _reading_ through the CPU cache, we need
		 * to make sure that any writes (both previous GPU writes from
		 * before a change in snooping levels and normal CPU writes)
		 * caught in that cache are flushed to main memory.
		 *
		 * We want to say
		 *   obj->cache_dirty &&
		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		 * but gcc's optimiser doesn't handle that as well and emits
		 * two jumps instead of one. Maybe one day...
		 */
		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1857
			if (i915_gem_clflush_object(obj, 0))
1858
				flags &= ~EXEC_OBJECT_ASYNC;
1859 1860
		}

1861 1862 1863 1864
		if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
			err = i915_request_await_object
				(eb->request, obj, flags & EXEC_OBJECT_WRITE);
		}
1865

1866 1867
		if (err == 0)
			err = i915_vma_move_to_active(vma, eb->request, flags);
1868

1869
		i915_vma_unlock(vma);
1870

1871 1872 1873 1874
		__eb_unreserve_vma(vma, flags);
		vma->exec_flags = NULL;

		if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1875
			i915_vma_put(vma);
1876
	}
1877 1878 1879 1880 1881
	ww_acquire_fini(&acquire);

	if (unlikely(err))
		goto err_skip;

1882
	eb->exec = NULL;
1883

1884
	/* Unconditionally flush any chipset caches (for streaming writes). */
1885
	intel_gt_chipset_flush(eb->engine->gt);
1886
	return 0;
1887 1888 1889 1890

err_skip:
	i915_request_skip(eb->request, err);
	return err;
1891 1892
}

T
Tvrtko Ursulin 已提交
1893
static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1894
{
1895
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
T
Tvrtko Ursulin 已提交
1896
		return -EINVAL;
1897

C
Chris Wilson 已提交
1898
	/* Kernel clipping was a DRI1 misfeature */
1899 1900
	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
		if (exec->num_cliprects || exec->cliprects_ptr)
T
Tvrtko Ursulin 已提交
1901
			return -EINVAL;
1902
	}
C
Chris Wilson 已提交
1903 1904 1905 1906 1907 1908

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
T
Tvrtko Ursulin 已提交
1909
		return -EINVAL;
C
Chris Wilson 已提交
1910 1911

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
T
Tvrtko Ursulin 已提交
1912
		return -EINVAL;
C
Chris Wilson 已提交
1913

T
Tvrtko Ursulin 已提交
1914
	return 0;
1915 1916
}

1917
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1918
{
1919 1920
	u32 *cs;
	int i;
1921

1922
	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
1923
		drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
1924 1925
		return -EINVAL;
	}
1926

1927
	cs = intel_ring_begin(rq, 4 * 2 + 2);
1928 1929
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1930

1931
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1932
	for (i = 0; i < 4; i++) {
1933 1934
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1935
	}
1936
	*cs++ = MI_NOOP;
1937
	intel_ring_advance(rq, cs);
1938 1939 1940 1941

	return 0;
}

1942
static struct i915_vma *
1943 1944 1945
shadow_batch_pin(struct drm_i915_gem_object *obj,
		 struct i915_address_space *vm,
		 unsigned int flags)
1946
{
1947 1948
	struct i915_vma *vma;
	int err;
1949

1950 1951 1952 1953 1954 1955 1956 1957 1958
	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma))
		return vma;

	err = i915_vma_pin(vma, 0, 0, flags);
	if (err)
		return ERR_PTR(err);

	return vma;
1959 1960
}

1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
struct eb_parse_work {
	struct dma_fence_work base;
	struct intel_engine_cs *engine;
	struct i915_vma *batch;
	struct i915_vma *shadow;
	struct i915_vma *trampoline;
	unsigned int batch_offset;
	unsigned int batch_length;
};

static int __eb_parse(struct dma_fence_work *work)
{
	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);

	return intel_engine_cmd_parser(pw->engine,
				       pw->batch,
				       pw->batch_offset,
				       pw->batch_length,
				       pw->shadow,
				       pw->trampoline);
}

1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
static void __eb_parse_release(struct dma_fence_work *work)
{
	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);

	if (pw->trampoline)
		i915_active_release(&pw->trampoline->active);
	i915_active_release(&pw->shadow->active);
	i915_active_release(&pw->batch->active);
}

1993 1994 1995
static const struct dma_fence_work_ops eb_parse_ops = {
	.name = "eb_parse",
	.work = __eb_parse,
1996
	.release = __eb_parse_release,
1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
};

static int eb_parse_pipeline(struct i915_execbuffer *eb,
			     struct i915_vma *shadow,
			     struct i915_vma *trampoline)
{
	struct eb_parse_work *pw;
	int err;

	pw = kzalloc(sizeof(*pw), GFP_KERNEL);
	if (!pw)
		return -ENOMEM;

2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
	err = i915_active_acquire(&eb->batch->active);
	if (err)
		goto err_free;

	err = i915_active_acquire(&shadow->active);
	if (err)
		goto err_batch;

	if (trampoline) {
		err = i915_active_acquire(&trampoline->active);
		if (err)
			goto err_shadow;
	}

2024 2025 2026 2027 2028 2029 2030 2031 2032
	dma_fence_work_init(&pw->base, &eb_parse_ops);

	pw->engine = eb->engine;
	pw->batch = eb->batch;
	pw->batch_offset = eb->batch_start_offset;
	pw->batch_length = eb->batch_len;
	pw->shadow = shadow;
	pw->trampoline = trampoline;

2033 2034 2035
	err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
	if (err)
		goto err_trampoline;
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062

	err = dma_resv_reserve_shared(pw->batch->resv, 1);
	if (err)
		goto err_batch_unlock;

	/* Wait for all writes (and relocs) into the batch to complete */
	err = i915_sw_fence_await_reservation(&pw->base.chain,
					      pw->batch->resv, NULL, false,
					      0, I915_FENCE_GFP);
	if (err < 0)
		goto err_batch_unlock;

	/* Keep the batch alive and unwritten as we parse */
	dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);

	dma_resv_unlock(pw->batch->resv);

	/* Force execution to wait for completion of the parser */
	dma_resv_lock(shadow->resv, NULL);
	dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
	dma_resv_unlock(shadow->resv);

	dma_fence_work_commit(&pw->base);
	return 0;

err_batch_unlock:
	dma_resv_unlock(pw->batch->resv);
2063 2064 2065 2066 2067 2068 2069 2070
err_trampoline:
	if (trampoline)
		i915_active_release(&trampoline->active);
err_shadow:
	i915_active_release(&shadow->active);
err_batch:
	i915_active_release(&eb->batch->active);
err_free:
2071 2072 2073 2074
	kfree(pw);
	return err;
}

2075
static int eb_parse(struct i915_execbuffer *eb)
2076
{
2077
	struct drm_i915_private *i915 = eb->i915;
2078
	struct intel_engine_pool_node *pool;
2079 2080
	struct i915_vma *shadow, *trampoline;
	unsigned int len;
2081
	int err;
2082

2083 2084 2085
	if (!eb_use_cmdparser(eb))
		return 0;

2086 2087 2088 2089 2090 2091 2092
	len = eb->batch_len;
	if (!CMDPARSER_USES_GGTT(eb->i915)) {
		/*
		 * ppGTT backed shadow buffers must be mapped RO, to prevent
		 * post-scan tampering
		 */
		if (!eb->context->vm->has_read_only) {
2093 2094
			drm_dbg(&i915->drm,
				"Cannot prevent post-scan tampering without RO capable vm\n");
2095 2096 2097 2098 2099 2100 2101
			return -EINVAL;
		}
	} else {
		len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
	}

	pool = intel_engine_get_pool(eb->engine, len);
2102
	if (IS_ERR(pool))
2103
		return PTR_ERR(pool);
2104

2105 2106 2107
	shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
	if (IS_ERR(shadow)) {
		err = PTR_ERR(shadow);
2108
		goto err;
2109
	}
2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
	i915_gem_object_set_readonly(shadow->obj);

	trampoline = NULL;
	if (CMDPARSER_USES_GGTT(eb->i915)) {
		trampoline = shadow;

		shadow = shadow_batch_pin(pool->obj,
					  &eb->engine->gt->ggtt->vm,
					  PIN_GLOBAL);
		if (IS_ERR(shadow)) {
			err = PTR_ERR(shadow);
			shadow = trampoline;
			goto err_shadow;
		}

		eb->batch_flags |= I915_DISPATCH_SECURE;
	}
2127

2128
	err = eb_parse_pipeline(eb, shadow, trampoline);
2129 2130
	if (err)
		goto err_trampoline;
2131

2132
	eb->vma[eb->buffer_count] = i915_vma_get(shadow);
2133 2134
	eb->flags[eb->buffer_count] =
		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
2135
	shadow->exec_flags = &eb->flags[eb->buffer_count];
2136
	eb->buffer_count++;
2137

2138
	eb->trampoline = trampoline;
2139
	eb->batch_start_offset = 0;
2140
	eb->batch = shadow;
2141

2142
	shadow->private = pool;
2143
	return 0;
2144

2145 2146 2147 2148 2149
err_trampoline:
	if (trampoline)
		i915_vma_unpin(trampoline);
err_shadow:
	i915_vma_unpin(shadow);
2150 2151
err:
	intel_engine_pool_put(pool);
2152
	return err;
2153
}
2154

2155
static void
2156
add_to_client(struct i915_request *rq, struct drm_file *file)
2157
{
2158 2159 2160 2161 2162 2163 2164
	struct drm_i915_file_private *file_priv = file->driver_priv;

	rq->file_priv = file_priv;

	spin_lock(&file_priv->mm.lock);
	list_add_tail(&rq->client_link, &file_priv->mm.request_list);
	spin_unlock(&file_priv->mm.lock);
2165 2166
}

2167
static int eb_submit(struct i915_execbuffer *eb)
2168
{
2169
	int err;
2170

2171 2172 2173
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
2174

2175
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2176 2177 2178
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
2179 2180
	}

2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
	/*
	 * After we completed waiting for other engines (using HW semaphores)
	 * then we can signal that this request/batch is ready to run. This
	 * allows us to determine if the batch is still waiting on the GPU
	 * or actually running by checking the breadcrumb.
	 */
	if (eb->engine->emit_init_breadcrumb) {
		err = eb->engine->emit_init_breadcrumb(eb->request);
		if (err)
			return err;
	}

2193
	err = eb->engine->emit_bb_start(eb->request,
2194 2195 2196
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
2197 2198 2199
					eb->batch_flags);
	if (err)
		return err;
2200

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210
	if (eb->trampoline) {
		GEM_BUG_ON(eb->batch_start_offset);
		err = eb->engine->emit_bb_start(eb->request,
						eb->trampoline->node.start +
						eb->batch_len,
						0, 0);
		if (err)
			return err;
	}

2211
	if (intel_context_nopreempt(eb->context))
2212
		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
2213

C
Chris Wilson 已提交
2214
	return 0;
2215 2216
}

2217 2218 2219 2220 2221 2222
static int num_vcs_engines(const struct drm_i915_private *i915)
{
	return hweight64(INTEL_INFO(i915)->engine_mask &
			 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
}

2223
/*
2224
 * Find one BSD ring to dispatch the corresponding BSD command.
2225
 * The engine index is returned.
2226
 */
2227
static unsigned int
2228 2229
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
2230 2231 2232
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

2233
	/* Check whether the file_priv has already selected one ring. */
2234
	if ((int)file_priv->bsd_engine < 0)
2235 2236
		file_priv->bsd_engine =
			get_random_int() % num_vcs_engines(dev_priv);
2237

2238
	return file_priv->bsd_engine;
2239 2240
}

2241
static const enum intel_engine_id user_ring_map[] = {
2242 2243 2244 2245 2246
	[I915_EXEC_DEFAULT]	= RCS0,
	[I915_EXEC_RENDER]	= RCS0,
	[I915_EXEC_BLT]		= BCS0,
	[I915_EXEC_BSD]		= VCS0,
	[I915_EXEC_VEBOX]	= VECS0
2247 2248
};

2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
static struct i915_request *eb_throttle(struct intel_context *ce)
{
	struct intel_ring *ring = ce->ring;
	struct intel_timeline *tl = ce->timeline;
	struct i915_request *rq;

	/*
	 * Completely unscientific finger-in-the-air estimates for suitable
	 * maximum user request size (to avoid blocking) and then backoff.
	 */
	if (intel_ring_update_space(ring) >= PAGE_SIZE)
		return NULL;

	/*
	 * Find a request that after waiting upon, there will be at least half
	 * the ring available. The hysteresis allows us to compete for the
	 * shared ring and should mean that we sleep less often prior to
	 * claiming our resources, but not so long that the ring completely
	 * drains before we can submit our next request.
	 */
	list_for_each_entry(rq, &tl->requests, link) {
		if (rq->ring != ring)
			continue;

		if (__intel_ring_space(rq->postfix,
				       ring->emit, ring->size) > ring->size / 2)
			break;
	}
	if (&rq->link == &tl->requests)
		return NULL; /* weird, we will check again later for real */

	return i915_request_get(rq);
}

static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
	struct intel_timeline *tl;
	struct i915_request *rq;
	int err;

2289 2290 2291 2292
	/*
	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
	 * EIO if the GPU is already wedged.
	 */
2293
	err = intel_gt_terminally_wedged(ce->engine->gt);
2294 2295 2296
	if (err)
		return err;

2297 2298 2299
	if (unlikely(intel_context_is_banned(ce)))
		return -EIO;

2300 2301 2302 2303 2304
	/*
	 * Pinning the contexts may generate requests in order to acquire
	 * GGTT space, so do this first before we reserve a seqno for
	 * ourselves.
	 */
2305
	err = intel_context_pin(ce);
2306 2307
	if (err)
		return err;
2308

2309 2310 2311 2312 2313 2314 2315 2316
	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
	 * we expect to access the hardware fairly frequently in the
	 * process, and require the engine to be kept awake between accesses.
	 * Upon dispatch, we acquire another prolonged wakeref that we hold
	 * until the timeline is idle, which in turn releases the wakeref
	 * taken on the engine, and the parent device.
	 */
2317 2318 2319
	tl = intel_context_timeline_lock(ce);
	if (IS_ERR(tl)) {
		err = PTR_ERR(tl);
2320
		goto err_unpin;
2321
	}
2322 2323

	intel_context_enter(ce);
2324 2325 2326 2327 2328
	rq = eb_throttle(ce);

	intel_context_timeline_unlock(tl);

	if (rq) {
2329 2330 2331 2332 2333 2334
		bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
		long timeout;

		timeout = MAX_SCHEDULE_TIMEOUT;
		if (nonblock)
			timeout = 0;
2335

2336 2337 2338
		timeout = i915_request_wait(rq,
					    I915_WAIT_INTERRUPTIBLE,
					    timeout);
2339
		i915_request_put(rq);
2340 2341 2342 2343 2344

		if (timeout < 0) {
			err = nonblock ? -EWOULDBLOCK : timeout;
			goto err_exit;
		}
2345
	}
2346

2347
	eb->engine = ce->engine;
2348 2349
	eb->context = ce;
	return 0;
2350

2351 2352 2353 2354
err_exit:
	mutex_lock(&tl->mutex);
	intel_context_exit(ce);
	intel_context_timeline_unlock(tl);
2355
err_unpin:
2356
	intel_context_unpin(ce);
2357
	return err;
2358 2359
}

2360
static void eb_unpin_engine(struct i915_execbuffer *eb)
2361
{
2362
	struct intel_context *ce = eb->context;
2363
	struct intel_timeline *tl = ce->timeline;
2364 2365 2366 2367 2368

	mutex_lock(&tl->mutex);
	intel_context_exit(ce);
	mutex_unlock(&tl->mutex);

2369
	intel_context_unpin(ce);
2370
}
2371

2372 2373 2374 2375
static unsigned int
eb_select_legacy_ring(struct i915_execbuffer *eb,
		      struct drm_file *file,
		      struct drm_i915_gem_execbuffer2 *args)
2376
{
2377
	struct drm_i915_private *i915 = eb->i915;
2378 2379
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;

2380 2381
	if (user_ring_id != I915_EXEC_BSD &&
	    (args->flags & I915_EXEC_BSD_MASK)) {
2382 2383 2384
		drm_dbg(&i915->drm,
			"execbuf with non bsd ring but with invalid "
			"bsd dispatch flags: %d\n", (int)(args->flags));
2385
		return -1;
2386 2387
	}

2388
	if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2389 2390 2391
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2392
			bsd_idx = gen8_dispatch_bsd_engine(i915, file);
2393 2394
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2395
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2396 2397
			bsd_idx--;
		} else {
2398 2399 2400
			drm_dbg(&i915->drm,
				"execbuf with unknown bsd ring: %u\n",
				bsd_idx);
2401
			return -1;
2402 2403
		}

2404
		return _VCS(bsd_idx);
2405 2406
	}

2407
	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2408 2409
		drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
			user_ring_id);
2410
		return -1;
2411 2412
	}

2413 2414 2415 2416
	return user_ring_map[user_ring_id];
}

static int
2417 2418 2419
eb_pin_engine(struct i915_execbuffer *eb,
	      struct drm_file *file,
	      struct drm_i915_gem_execbuffer2 *args)
2420 2421 2422 2423 2424
{
	struct intel_context *ce;
	unsigned int idx;
	int err;

2425 2426 2427 2428
	if (i915_gem_context_user_engines(eb->gem_context))
		idx = args->flags & I915_EXEC_RING_MASK;
	else
		idx = eb_select_legacy_ring(eb, file, args);
2429 2430 2431 2432 2433

	ce = i915_gem_context_get_engine(eb->gem_context, idx);
	if (IS_ERR(ce))
		return PTR_ERR(ce);

2434
	err = __eb_pin_engine(eb, ce);
2435 2436 2437
	intel_context_put(ce);

	return err;
2438 2439
}

2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
static void
__free_fence_array(struct drm_syncobj **fences, unsigned int n)
{
	while (n--)
		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
	kvfree(fences);
}

static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_file *file)
{
2452
	const unsigned long nfences = args->num_cliprects;
2453 2454
	struct drm_i915_gem_exec_fence __user *user;
	struct drm_syncobj **fences;
2455
	unsigned long n;
2456 2457 2458 2459 2460
	int err;

	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
		return NULL;

2461 2462 2463 2464 2465
	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
	if (nfences > min_t(unsigned long,
			    ULONG_MAX / sizeof(*user),
			    SIZE_MAX / sizeof(*fences)))
2466 2467 2468
		return ERR_PTR(-EINVAL);

	user = u64_to_user_ptr(args->cliprects_ptr);
2469
	if (!access_ok(user, nfences * sizeof(*user)))
2470 2471
		return ERR_PTR(-EFAULT);

2472
	fences = kvmalloc_array(nfences, sizeof(*fences),
2473
				__GFP_NOWARN | GFP_KERNEL);
2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
	if (!fences)
		return ERR_PTR(-ENOMEM);

	for (n = 0; n < nfences; n++) {
		struct drm_i915_gem_exec_fence fence;
		struct drm_syncobj *syncobj;

		if (__copy_from_user(&fence, user++, sizeof(fence))) {
			err = -EFAULT;
			goto err;
		}

2486 2487 2488 2489 2490
		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
			err = -EINVAL;
			goto err;
		}

2491 2492 2493 2494 2495 2496 2497
		syncobj = drm_syncobj_find(file, fence.handle);
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
			err = -ENOENT;
			goto err;
		}

2498 2499 2500
		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);

2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535
		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
	}

	return fences;

err:
	__free_fence_array(fences, n);
	return ERR_PTR(err);
}

static void
put_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_syncobj **fences)
{
	if (fences)
		__free_fence_array(fences, args->num_cliprects);
}

static int
await_fence_array(struct i915_execbuffer *eb,
		  struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	unsigned int n;
	int err;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		struct dma_fence *fence;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_WAIT))
			continue;

J
Jason Ekstrand 已提交
2536
		fence = drm_syncobj_fence_get(syncobj);
2537 2538 2539
		if (!fence)
			return -EINVAL;

2540
		err = i915_request_await_dma_fence(eb->request, fence);
2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564
		dma_fence_put(fence);
		if (err < 0)
			return err;
	}

	return 0;
}

static void
signal_fence_array(struct i915_execbuffer *eb,
		   struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	struct dma_fence * const fence = &eb->request->fence;
	unsigned int n;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_SIGNAL))
			continue;

2565
		drm_syncobj_replace_fence(syncobj, fence);
2566 2567 2568
	}
}

2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
{
	struct i915_request *rq, *rn;

	list_for_each_entry_safe(rq, rn, &tl->requests, link)
		if (rq == end || !i915_request_retire(rq))
			break;
}

static void eb_request_add(struct i915_execbuffer *eb)
{
	struct i915_request *rq = eb->request;
	struct intel_timeline * const tl = i915_request_timeline(rq);
	struct i915_sched_attr attr = {};
	struct i915_request *prev;

	lockdep_assert_held(&tl->mutex);
	lockdep_unpin_lock(&tl->mutex, rq->cookie);

	trace_i915_request_add(rq);

	prev = __i915_request_commit(rq);

	/* Check that the context wasn't destroyed before submission */
	if (likely(rcu_access_pointer(eb->context->gem_context))) {
		attr = eb->gem_context->sched;

		/*
		 * Boost actual workloads past semaphores!
		 *
		 * With semaphores we spin on one engine waiting for another,
		 * simply to reduce the latency of starting our work when
		 * the signaler completes. However, if there is any other
		 * work that we could be doing on this engine instead, that
		 * is better utilisation and will reduce the overall duration
		 * of the current work. To avoid PI boosting a semaphore
		 * far in the distance past over useful work, we keep a history
		 * of any semaphore use along our dependency chain.
		 */
		if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
			attr.priority |= I915_PRIORITY_NOSEMAPHORE;

		/*
		 * Boost priorities to new clients (new request flows).
		 *
		 * Allow interactive/synchronous clients to jump ahead of
		 * the bulk clients. (FQ_CODEL)
		 */
		if (list_empty(&rq->sched.signalers_list))
			attr.priority |= I915_PRIORITY_WAIT;
	} else {
		/* Serialise with context_close via the add_to_timeline */
		i915_request_skip(rq, -ENOENT);
	}

	local_bh_disable();
	__i915_request_queue(rq, &attr);
	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */

	/* Try to clean up the client's timeline after submitting the request */
	if (prev)
		retire_requests(tl, prev);

	mutex_unlock(&tl->mutex);
}

2635
static int
2636
i915_gem_do_execbuffer(struct drm_device *dev,
2637 2638
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
2639 2640
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_syncobj **fences)
2641
{
2642
	struct drm_i915_private *i915 = to_i915(dev);
2643
	struct i915_execbuffer eb;
2644
	struct dma_fence *in_fence = NULL;
2645
	struct dma_fence *exec_fence = NULL;
2646 2647
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
2648
	int err;
2649

2650
	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2651 2652
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2653

2654
	eb.i915 = i915;
2655 2656
	eb.file = file;
	eb.args = args;
2657
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2658
		args->flags |= __EXEC_HAS_RELOC;
2659

2660
	eb.exec = exec;
2661 2662
	eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
	eb.vma[0] = NULL;
2663 2664
	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);

2665
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2666 2667
	reloc_cache_init(&eb.reloc_cache, eb.i915);

2668
	eb.buffer_count = args->buffer_count;
2669 2670
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;
2671
	eb.trampoline = NULL;
2672

2673
	eb.batch_flags = 0;
2674
	if (args->flags & I915_EXEC_SECURE) {
2675 2676 2677 2678 2679 2680 2681
		if (INTEL_GEN(i915) >= 11)
			return -ENODEV;

		/* Return -EPERM to trigger fallback code on old binaries. */
		if (!HAS_SECURE_BATCHES(i915))
			return -EPERM;

2682
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2683
			return -EPERM;
2684

2685
		eb.batch_flags |= I915_DISPATCH_SECURE;
2686
	}
2687
	if (args->flags & I915_EXEC_IS_PINNED)
2688
		eb.batch_flags |= I915_DISPATCH_PINNED;
2689

2690 2691
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2692 2693
		if (!in_fence)
			return -EINVAL;
2694 2695
	}

2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708
	if (args->flags & I915_EXEC_FENCE_SUBMIT) {
		if (in_fence) {
			err = -EINVAL;
			goto err_in_fence;
		}

		exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
		if (!exec_fence) {
			err = -EINVAL;
			goto err_in_fence;
		}
	}

2709 2710 2711
	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
2712
			err = out_fence_fd;
2713
			goto err_exec_fence;
2714 2715 2716
		}
	}

2717 2718 2719 2720 2721
	err = eb_create(&eb);
	if (err)
		goto err_out_fence;

	GEM_BUG_ON(!eb.lut_size);
2722

2723 2724 2725 2726
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_destroy;

2727
	err = eb_pin_engine(&eb, file, args);
2728
	if (unlikely(err))
2729
		goto err_context;
2730

2731 2732
	err = i915_mutex_lock_interruptible(dev);
	if (err)
2733 2734
		goto err_engine;

2735
	err = eb_relocate(&eb);
2736
	if (err) {
2737 2738 2739 2740 2741 2742 2743 2744 2745
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
		goto err_vma;
2746
	}
2747

2748
	if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2749 2750
		drm_dbg(&i915->drm,
			"Attempting to use self-modifying batch buffer\n");
2751 2752
		err = -EINVAL;
		goto err_vma;
2753
	}
2754 2755
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2756
		drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
2757 2758
		err = -EINVAL;
		goto err_vma;
2759
	}
2760

2761 2762 2763
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;

2764 2765 2766
	err = eb_parse(&eb);
	if (err)
		goto err_vma;
2767

2768 2769
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2770
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2771
	 * hsw should have this fixed, but bdw mucks it up again. */
2772
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2773
		struct i915_vma *vma;
2774

2775 2776 2777 2778 2779 2780
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2781
		 *   so we don't really have issues with multiple objects not
2782 2783 2784
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2785
		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2786
		if (IS_ERR(vma)) {
2787
			err = PTR_ERR(vma);
2788
			goto err_parse;
C
Chris Wilson 已提交
2789
		}
2790

2791
		eb.batch = vma;
2792
	}
2793

2794 2795 2796
	/* All GPU relocation batches must be submitted prior to the user rq */
	GEM_BUG_ON(eb.reloc_cache.rq);

2797
	/* Allocate a request for this batch buffer nice and early. */
2798
	eb.request = i915_request_create(eb.context);
2799
	if (IS_ERR(eb.request)) {
2800
		err = PTR_ERR(eb.request);
2801
		goto err_batch_unpin;
2802
	}
2803

2804
	if (in_fence) {
2805
		err = i915_request_await_dma_fence(eb.request, in_fence);
2806
		if (err < 0)
2807 2808 2809
			goto err_request;
	}

2810 2811 2812 2813 2814 2815 2816
	if (exec_fence) {
		err = i915_request_await_execution(eb.request, exec_fence,
						   eb.engine->bond_execute);
		if (err < 0)
			goto err_request;
	}

2817 2818 2819 2820 2821 2822
	if (fences) {
		err = await_fence_array(&eb, fences);
		if (err)
			goto err_request;
	}

2823
	if (out_fence_fd != -1) {
2824
		out_fence = sync_file_create(&eb.request->fence);
2825
		if (!out_fence) {
2826
			err = -ENOMEM;
2827 2828 2829 2830
			goto err_request;
		}
	}

2831 2832
	/*
	 * Whilst this request exists, batch_obj will be on the
2833 2834 2835 2836 2837
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2838
	eb.request->batch = eb.batch;
2839 2840
	if (eb.batch->private)
		intel_engine_pool_mark_active(eb.batch->private, eb.request);
2841

2842
	trace_i915_request_queue(eb.request, eb.batch_flags);
2843
	err = eb_submit(&eb);
2844
err_request:
2845
	add_to_client(eb.request, file);
2846
	i915_request_get(eb.request);
2847
	eb_request_add(&eb);
2848

2849 2850 2851
	if (fences)
		signal_fence_array(&eb, fences);

2852
	if (out_fence) {
2853
		if (err == 0) {
2854
			fd_install(out_fence_fd, out_fence->file);
2855
			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2856 2857 2858 2859 2860 2861
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2862
	i915_request_put(eb.request);
2863

2864
err_batch_unpin:
2865
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2866
		i915_vma_unpin(eb.batch);
2867
err_parse:
2868 2869
	if (eb.batch->private)
		intel_engine_pool_put(eb.batch->private);
2870 2871 2872
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
2873 2874
	if (eb.trampoline)
		i915_vma_unpin(eb.trampoline);
2875
	mutex_unlock(&dev->struct_mutex);
2876 2877
err_engine:
	eb_unpin_engine(&eb);
2878
err_context:
2879
	i915_gem_context_put(eb.gem_context);
2880
err_destroy:
2881
	eb_destroy(&eb);
2882
err_out_fence:
2883 2884
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2885 2886
err_exec_fence:
	dma_fence_put(exec_fence);
2887
err_in_fence:
2888
	dma_fence_put(in_fence);
2889
	return err;
2890 2891
}

2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911
static size_t eb_element_size(void)
{
	return (sizeof(struct drm_i915_gem_exec_object2) +
		sizeof(struct i915_vma *) +
		sizeof(unsigned int));
}

static bool check_buffer_count(size_t count)
{
	const size_t sz = eb_element_size();

	/*
	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
	 * array size (see eb_create()). Otherwise, we can accept an array as
	 * large as can be addressed (though use large arrays at your peril)!
	 */

	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}

2912 2913 2914 2915 2916
/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
2917 2918
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
2919
{
2920
	struct drm_i915_private *i915 = to_i915(dev);
2921 2922 2923 2924
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2925
	const size_t count = args->buffer_count;
2926 2927
	unsigned int i;
	int err;
2928

2929
	if (!check_buffer_count(count)) {
2930
		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2931 2932 2933
		return -EINVAL;
	}

2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

T
Tvrtko Ursulin 已提交
2945 2946 2947
	err = i915_gem_check_execbuffer(&exec2);
	if (err)
		return err;
2948

2949
	/* Copy in the exec list from userland */
2950
	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2951
				   __GFP_NOWARN | GFP_KERNEL);
2952
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2953
				    __GFP_NOWARN | GFP_KERNEL);
2954
	if (exec_list == NULL || exec2_list == NULL) {
2955 2956 2957
		drm_dbg(&i915->drm,
			"Failed to allocate exec list for %d buffers\n",
			args->buffer_count);
M
Michal Hocko 已提交
2958 2959
		kvfree(exec_list);
		kvfree(exec2_list);
2960 2961
		return -ENOMEM;
	}
2962
	err = copy_from_user(exec_list,
2963
			     u64_to_user_ptr(args->buffers_ptr),
2964
			     sizeof(*exec_list) * count);
2965
	if (err) {
2966 2967
		drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
			args->buffer_count, err);
M
Michal Hocko 已提交
2968 2969
		kvfree(exec_list);
		kvfree(exec2_list);
2970 2971 2972 2973 2974 2975 2976 2977 2978
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2979
		if (INTEL_GEN(to_i915(dev)) < 4)
2980 2981 2982 2983 2984
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2985
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2986
	if (exec2.flags & __EXEC_HAS_RELOC) {
2987
		struct drm_i915_gem_exec_object __user *user_exec_list =
2988
			u64_to_user_ptr(args->buffers_ptr);
2989

2990
		/* Copy the new buffer offsets back to the user's exec list. */
2991
		for (i = 0; i < args->buffer_count; i++) {
2992 2993 2994
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2995
			exec2_list[i].offset =
2996 2997 2998 2999 3000
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
3001
				break;
3002 3003 3004
		}
	}

M
Michal Hocko 已提交
3005 3006
	kvfree(exec_list);
	kvfree(exec2_list);
3007
	return err;
3008 3009 3010
}

int
3011 3012
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file)
3013
{
3014
	struct drm_i915_private *i915 = to_i915(dev);
3015
	struct drm_i915_gem_execbuffer2 *args = data;
3016
	struct drm_i915_gem_exec_object2 *exec2_list;
3017
	struct drm_syncobj **fences = NULL;
3018
	const size_t count = args->buffer_count;
3019
	int err;
3020

3021
	if (!check_buffer_count(count)) {
3022
		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
3023 3024 3025
		return -EINVAL;
	}

T
Tvrtko Ursulin 已提交
3026 3027 3028
	err = i915_gem_check_execbuffer(args);
	if (err)
		return err;
3029 3030

	/* Allocate an extra slot for use by the command parser */
3031
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
3032
				    __GFP_NOWARN | GFP_KERNEL);
3033
	if (exec2_list == NULL) {
3034 3035
		drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
			count);
3036 3037
		return -ENOMEM;
	}
3038 3039
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
3040
			   sizeof(*exec2_list) * count)) {
3041
		drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
M
Michal Hocko 已提交
3042
		kvfree(exec2_list);
3043 3044 3045
		return -EFAULT;
	}

3046 3047 3048 3049 3050 3051 3052 3053 3054
	if (args->flags & I915_EXEC_FENCE_ARRAY) {
		fences = get_fence_array(args, file);
		if (IS_ERR(fences)) {
			kvfree(exec2_list);
			return PTR_ERR(fences);
		}
	}

	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
3055 3056 3057 3058 3059 3060 3061 3062

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
3063
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
3064 3065
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
3066

3067
		/* Copy the new buffer offsets back to the user's exec list. */
3068 3069 3070 3071 3072 3073 3074 3075
		/*
		 * Note: count * sizeof(*user_exec_list) does not overflow,
		 * because we checked 'count' in check_buffer_count().
		 *
		 * And this range already got effectively checked earlier
		 * when we did the "copy_from_user()" above.
		 */
		if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
3076
			goto end;
3077

3078
		for (i = 0; i < args->buffer_count; i++) {
3079 3080 3081
			if (!(exec2_list[i].offset & UPDATE))
				continue;

3082
			exec2_list[i].offset =
3083 3084 3085 3086
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
3087
		}
3088 3089
end_user:
		user_access_end();
3090
end:;
3091 3092
	}

3093
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
3094
	put_fence_array(args, fences);
M
Michal Hocko 已提交
3095
	kvfree(exec2_list);
3096
	return err;
3097
}