i915_gem_execbuffer.c 76.1 KB
Newer Older
1
/*
2
 * SPDX-License-Identifier: MIT
3
 *
4
 * Copyright © 2008,2010 Intel Corporation
5 6
 */

7
#include <linux/intel-iommu.h>
8
#include <linux/dma-resv.h>
9
#include <linux/sync_file.h>
10 11
#include <linux/uaccess.h>

12
#include <drm/drm_syncobj.h>
13
#include <drm/i915_drm.h>
14

15 16
#include "display/intel_frontbuffer.h"

17
#include "gem/i915_gem_ioctls.h"
18
#include "gt/intel_context.h"
19
#include "gt/intel_engine_pool.h"
20
#include "gt/intel_gt.h"
21 22
#include "gt/intel_gt_pm.h"

23
#include "i915_drv.h"
24
#include "i915_gem_clflush.h"
25
#include "i915_gem_context.h"
26
#include "i915_gem_ioctls.h"
27 28
#include "i915_trace.h"

29 30 31 32 33 34
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
35

36 37 38 39 40 41
#define __EXEC_OBJECT_HAS_REF		BIT(31)
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
42 43 44 45
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
#define __EXEC_VALIDATED	BIT(30)
46
#define __EXEC_INTERNAL_FLAGS	(~0u << 30)
47
#define UPDATE			PIN_OFFSET_FIXED
48 49

#define BATCH_OFFSET_BIAS (256*1024)
50

51
#define __I915_EXEC_ILLEGAL_FLAGS \
52 53 54
	(__I915_EXEC_UNKNOWN_FLAGS | \
	 I915_EXEC_CONSTANTS_MASK  | \
	 I915_EXEC_RESOURCE_STREAMER)
55

56 57 58 59 60 61 62 63 64
/* Catch emission of unexpected errors for CI! */
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#undef EINVAL
#define EINVAL ({ \
	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
	22; \
})
#endif

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
 * At the level of talking to the hardware, submitting a batchbuffer for the
 * GPU to execute is to add content to a buffer from which the HW
 * command streamer is reading.
 *
 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
 *    Execlists, this command is not placed on the same buffer as the
 *    remaining items.
 *
 * 2. Add a command to invalidate caches to the buffer.
 *
 * 3. Add a batchbuffer start command to the buffer; the start command is
 *    essentially a token together with the GPU address of the batchbuffer
 *    to be executed.
 *
 * 4. Add a pipeline flush to the buffer.
 *
 * 5. Add a memory write command to the buffer to record when the GPU
 *    is done executing the batchbuffer. The memory write writes the
 *    global sequence number of the request, ``i915_request::global_seqno``;
 *    the i915 driver uses the current value in the register to determine
 *    if the GPU has completed the batchbuffer.
 *
 * 6. Add a user interrupt command to the buffer. This command instructs
 *    the GPU to issue an interrupt when the command, pipeline flush and
 *    memory write are completed.
 *
 * 7. Inform the hardware of the additional commands added to the buffer
 *    (by updating the tail pointer).
 *
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

216
struct i915_execbuffer {
217 218 219 220
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
221 222
	struct i915_vma **vma;
	unsigned int *flags;
223 224

	struct intel_engine_cs *engine; /** engine to queue the request to */
225 226
	struct intel_context *context; /* logical state for the request */
	struct i915_gem_context *gem_context; /** caller's context */
227

228
	struct i915_request *request; /** our request to build */
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
	struct i915_vma *batch; /** identity of the batch obj/vma */

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
245
	struct reloc_cache {
246 247 248
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
249
		unsigned int gen; /** Cached value of INTEL_GEN */
250
		bool use_64bit_reloc : 1;
251 252 253
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
254

255
		struct intel_context *ce;
256
		struct i915_request *rq;
257 258
		u32 *rq_cmd;
		unsigned int rq_size;
259
	} reloc_cache;
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
275 276
};

277
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
278

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
/*
 * Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline u64 gen8_canonical_addr(u64 address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline u64 gen8_noncanonical_addr(u64 address)
{
	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}

298 299
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
300
	return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
301 302
}

303
static int eb_create(struct i915_execbuffer *eb)
304
{
305 306
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
307

308 309 310 311 312 313 314 315 316 317 318
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
319
		do {
320
			gfp_t flags;
321 322 323 324 325 326 327

			/* While we can still reduce the allocation size, don't
			 * raise a warning and allow the allocation to fail.
			 * On the last pass though, we want to try as hard
			 * as possible to perform the allocation and warn
			 * if it fails.
			 */
328
			flags = GFP_KERNEL;
329 330 331
			if (size > 1)
				flags |= __GFP_NORETRY | __GFP_NOWARN;

332
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
333
					      flags);
334 335 336 337
			if (eb->buckets)
				break;
		} while (--size);

338 339
		if (unlikely(!size))
			return -ENOMEM;
340

341
		eb->lut_size = size;
342
	} else {
343
		eb->lut_size = -eb->buffer_count;
344
	}
345

346
	return 0;
347 348
}

349 350
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
351 352
		 const struct i915_vma *vma,
		 unsigned int flags)
353 354 355 356 357 358 359
{
	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

360
	if (flags & EXEC_OBJECT_PINNED &&
361 362 363
	    vma->node.start != entry->offset)
		return true;

364
	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
365 366 367
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

368
	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
369 370 371
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

372 373 374 375
	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
		return true;

376 377 378
	return false;
}

379
static inline bool
380
eb_pin_vma(struct i915_execbuffer *eb,
381
	   const struct drm_i915_gem_exec_object2 *entry,
382 383
	   struct i915_vma *vma)
{
384 385
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
386

387
	if (vma->node.size)
388
		pin_flags = vma->node.start;
389
	else
390
		pin_flags = entry->offset & PIN_OFFSET_MASK;
391

392 393 394
	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
		pin_flags |= PIN_GLOBAL;
395

396 397
	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
		return false;
398

399
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
400
		if (unlikely(i915_vma_pin_fence(vma))) {
401
			i915_vma_unpin(vma);
402
			return false;
403 404
		}

405
		if (vma->fence)
406
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
407 408
	}

409 410
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	return !eb_vma_misplaced(entry, vma, exec_flags);
411 412
}

413
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
414
{
415
	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
416

417
	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
418
		__i915_vma_unpin_fence(vma);
419

420
	__i915_vma_unpin(vma);
421 422
}

423
static inline void
424
eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
425
{
426
	if (!(*flags & __EXEC_OBJECT_HAS_PIN))
427
		return;
428

429 430
	__eb_unreserve_vma(vma, *flags);
	*flags &= ~__EXEC_OBJECT_RESERVED;
431 432
}

433 434 435 436
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
437
{
438 439
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
440

441 442 443 444 445 446 447 448
	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
449
		     entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
450 451 452 453 454 455 456 457
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
458 459
	}

460
	if (unlikely(vma->exec_flags)) {
461 462 463 464 465 466 467 468 469 470 471 472
		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
			  entry->handle, (int)(entry - eb->exec));
		return -EINVAL;
	}

	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

473 474 475 476 477 478 479 480 481 482 483 484
	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

485
	return 0;
486 487
}

488
static int
489 490 491
eb_add_vma(struct i915_execbuffer *eb,
	   unsigned int i, unsigned batch_idx,
	   struct i915_vma *vma)
492
{
493
	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
494 495 496 497 498 499 500 501
	int err;

	GEM_BUG_ON(i915_vma_is_closed(vma));

	if (!(eb->args->flags & __EXEC_VALIDATED)) {
		err = eb_validate_vma(eb, entry, vma);
		if (unlikely(err))
			return err;
502 503
	}

504
	if (eb->lut_size > 0) {
505
		vma->exec_handle = entry->handle;
506
		hlist_add_head(&vma->exec_node,
507 508
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
509
	}
510

511 512 513 514 515 516 517 518 519
	if (entry->relocation_count)
		list_add_tail(&vma->reloc_link, &eb->relocs);

	/*
	 * Stash a pointer from the vma to execobj, so we can query its flags,
	 * size, alignment etc as provided by the user. Also we stash a pointer
	 * to the vma inside the execobj so that we can use a direct lookup
	 * to find the right target VMA when doing relocations.
	 */
520
	eb->vma[i] = vma;
521
	eb->flags[i] = entry->flags;
522
	vma->exec_flags = &eb->flags[i];
523

524 525 526 527 528 529 530 531 532 533
	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	if (i == batch_idx) {
534 535
		if (entry->relocation_count &&
		    !(eb->flags[i] & EXEC_OBJECT_PINNED))
536 537 538 539 540 541 542
			eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
		if (eb->reloc_cache.has_fence)
			eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;

		eb->batch = vma;
	}

543
	err = 0;
544
	if (eb_pin_vma(eb, entry, vma)) {
545 546 547 548
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
549 550 551 552 553 554
	} else {
		eb_unreserve_vma(vma, vma->exec_flags);

		list_add_tail(&vma->exec_link, &eb->unbound);
		if (drm_mm_node_allocated(&vma->node))
			err = i915_vma_unbind(vma);
555 556
		if (unlikely(err))
			vma->exec_flags = NULL;
557 558 559 560 561 562 563 564 565 566
	}
	return err;
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

567 568 569 570 571
	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;
572 573 574 575 576 577 578 579 580

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
			  struct i915_vma *vma)
{
581 582 583
	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
584 585
	int err;

586 587 588
	pin_flags = PIN_USER | PIN_NONBLOCK;
	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
		pin_flags |= PIN_GLOBAL;
589 590 591 592 593

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
594 595
	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		pin_flags |= PIN_ZONE_4G;
596

597 598
	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
		pin_flags |= PIN_MAPPABLE;
599

600 601 602 603 604
	if (exec_flags & EXEC_OBJECT_PINNED) {
		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
		pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
	} else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
605 606
	}

607 608 609
	err = i915_vma_pin(vma,
			   entry->pad_to_size, entry->alignment,
			   pin_flags);
610 611 612 613 614 615 616 617
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

618
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
619
		err = i915_vma_pin_fence(vma);
620 621 622 623 624
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

625
		if (vma->fence)
626
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
627 628
	}

629 630
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
631

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	struct list_head last;
	struct i915_vma *vma;
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
		list_for_each_entry(vma, &eb->unbound, exec_link) {
			err = eb_reserve_vma(eb, vma);
			if (err)
				break;
		}
		if (err != -ENOSPC)
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
672 673
			unsigned int flags = eb->flags[i];
			struct i915_vma *vma = eb->vma[i];
674

675 676
			if (flags & EXEC_OBJECT_PINNED &&
			    flags & __EXEC_OBJECT_HAS_PIN)
677 678
				continue;

679
			eb_unreserve_vma(vma, &eb->flags[i]);
680

681
			if (flags & EXEC_OBJECT_PINNED)
682
				/* Pinned must have their slot */
683
				list_add(&vma->exec_link, &eb->unbound);
684
			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
685
				/* Map require the lowest 256MiB (aperture) */
686
				list_add_tail(&vma->exec_link, &eb->unbound);
687 688 689
			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
				/* Prioritise 4GiB region for restricted bo */
				list_add(&vma->exec_link, &last);
690 691 692 693 694 695 696 697 698 699 700
			else
				list_add_tail(&vma->exec_link, &last);
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
701
			err = i915_gem_evict_vm(eb->context->vm);
702 703 704 705 706 707 708 709
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
	} while (1);
710
}
711

712 713
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
714 715 716 717
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
718 719 720 721 722 723 724
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
725 726
	if (unlikely(!ctx))
		return -ENOENT;
727

728
	eb->gem_context = ctx;
729
	if (ctx->vm)
730
		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
731 732

	eb->context_flags = 0;
733
	if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
734 735 736 737 738 739
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
740
{
741
	struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
742
	struct drm_i915_gem_object *obj;
743
	unsigned int i, batch;
744
	int err;
745

746
	if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
747 748
		return -EIO;

749 750
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
751

752 753
	batch = eb_batch_index(eb);

754 755 756 757 758 759
	mutex_lock(&eb->gem_context->mutex);
	if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
		err = -ENOENT;
		goto err_ctx;
	}

760 761
	for (i = 0; i < eb->buffer_count; i++) {
		u32 handle = eb->exec[i].handle;
762
		struct i915_lut_handle *lut;
763
		struct i915_vma *vma;
764

765 766
		vma = radix_tree_lookup(handles_vma, handle);
		if (likely(vma))
767
			goto add_vma;
768

769
		obj = i915_gem_object_lookup(eb->file, handle);
770
		if (unlikely(!obj)) {
771
			err = -ENOENT;
772
			goto err_vma;
773 774
		}

775
		vma = i915_vma_instance(obj, eb->context->vm, NULL);
776
		if (IS_ERR(vma)) {
777
			err = PTR_ERR(vma);
778
			goto err_obj;
779 780
		}

781
		lut = i915_lut_handle_alloc();
782 783 784 785 786 787 788
		if (unlikely(!lut)) {
			err = -ENOMEM;
			goto err_obj;
		}

		err = radix_tree_insert(handles_vma, handle, vma);
		if (unlikely(err)) {
789
			i915_lut_handle_free(lut);
790
			goto err_obj;
791
		}
792

793 794
		/* transfer ref to lut */
		if (!atomic_fetch_inc(&vma->open_count))
795
			i915_vma_reopen(vma);
796
		lut->handle = handle;
797 798 799 800 801
		lut->ctx = eb->gem_context;

		i915_gem_object_lock(obj);
		list_add(&lut->obj_link, &obj->lut_list);
		i915_gem_object_unlock(obj);
802

803
add_vma:
804
		err = eb_add_vma(eb, i, batch, vma);
805
		if (unlikely(err))
806
			goto err_vma;
807

808 809
		GEM_BUG_ON(vma != eb->vma[i]);
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
810 811
		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
			   eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
812 813
	}

814 815
	mutex_unlock(&eb->gem_context->mutex);

816 817 818
	eb->args->flags |= __EXEC_VALIDATED;
	return eb_reserve(eb);

819
err_obj:
820
	i915_gem_object_put(obj);
821 822
err_vma:
	eb->vma[i] = NULL;
823 824
err_ctx:
	mutex_unlock(&eb->gem_context->mutex);
825
	return err;
826 827
}

828
static struct i915_vma *
829
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
830
{
831 832
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
833
			return NULL;
834
		return eb->vma[handle];
835 836
	} else {
		struct hlist_head *head;
837
		struct i915_vma *vma;
838

839
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
840
		hlist_for_each_entry(vma, head, exec_node) {
841 842
			if (vma->exec_handle == handle)
				return vma;
843 844 845
		}
		return NULL;
	}
846 847
}

848
static void eb_release_vmas(const struct i915_execbuffer *eb)
849
{
850 851 852 853
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
854 855
		struct i915_vma *vma = eb->vma[i];
		unsigned int flags = eb->flags[i];
856

857
		if (!vma)
858
			break;
859

860 861 862
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
		vma->exec_flags = NULL;
		eb->vma[i] = NULL;
863

864 865
		if (flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, flags);
866

867
		if (flags & __EXEC_OBJECT_HAS_REF)
868
			i915_vma_put(vma);
869
	}
870 871
}

872
static void eb_reset_vmas(const struct i915_execbuffer *eb)
873
{
874
	eb_release_vmas(eb);
875
	if (eb->lut_size > 0)
876 877
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_size);
878 879
}

880
static void eb_destroy(const struct i915_execbuffer *eb)
881
{
882 883
	GEM_BUG_ON(eb->reloc_cache.rq);

884 885 886
	if (eb->reloc_cache.ce)
		intel_context_put(eb->reloc_cache.ce);

887
	if (eb->lut_size > 0)
888
		kfree(eb->buckets);
889 890
}

891
static inline u64
892
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
893
		  const struct i915_vma *target)
894
{
895
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
896 897
}

898 899
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
900
{
901
	cache->page = -1;
902
	cache->vaddr = 0;
903
	/* Must be a variable in the struct to allow GCC to unroll. */
904
	cache->gen = INTEL_GEN(i915);
905
	cache->has_llc = HAS_LLC(i915);
906
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
907 908
	cache->has_fence = cache->gen < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
909
	cache->node.allocated = false;
910
	cache->ce = NULL;
911 912
	cache->rq = NULL;
	cache->rq_size = 0;
913
}
914

915 916 917 918 919 920 921 922
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
923 924
}

925 926
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

927 928 929 930 931 932 933
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

934 935 936 937
static void reloc_gpu_flush(struct reloc_cache *cache)
{
	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
938 939

	__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
940
	i915_gem_object_unpin_map(cache->rq->batch->obj);
941

942
	intel_gt_chipset_flush(cache->rq->engine->gt);
943

944
	i915_request_add(cache->rq);
945 946 947
	cache->rq = NULL;
}

948
static void reloc_cache_reset(struct reloc_cache *cache)
949
{
950
	void *vaddr;
951

952 953 954
	if (cache->rq)
		reloc_gpu_flush(cache);

955 956
	if (!cache->vaddr)
		return;
957

958 959 960 961
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
962

963
		kunmap_atomic(vaddr);
964
		i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
965
	} else {
966 967 968
		struct i915_ggtt *ggtt = cache_to_ggtt(cache);

		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
969
		io_mapping_unmap_atomic((void __iomem *)vaddr);
970

971
		if (cache->node.allocated) {
972 973 974
			ggtt->vm.clear_range(&ggtt->vm,
					     cache->node.start,
					     cache->node.size);
975 976 977
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
978
		}
979
	}
980 981 982

	cache->vaddr = 0;
	cache->page = -1;
983 984 985 986
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
987
			unsigned long page)
988
{
989 990 991 992 993 994
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
995
		int err;
996

997
		err = i915_gem_object_prepare_write(obj, &flushes);
998 999
		if (err)
			return ERR_PTR(err);
1000 1001 1002

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
1003

1004 1005 1006 1007
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
1008 1009
	}

1010 1011
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1012
	cache->page = page;
1013

1014
	return vaddr;
1015 1016
}

1017 1018
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1019
			 unsigned long page)
1020
{
1021
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1022
	unsigned long offset;
1023
	void *vaddr;
1024

1025
	if (cache->vaddr) {
1026
		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1027
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1028 1029
	} else {
		struct i915_vma *vma;
1030
		int err;
1031

1032 1033 1034
		if (i915_gem_object_is_tiled(obj))
			return ERR_PTR(-EINVAL);

1035
		if (use_cpu_reloc(cache, obj))
1036
			return NULL;
1037

1038
		i915_gem_object_lock(obj);
1039
		err = i915_gem_object_set_to_gtt_domain(obj, true);
1040
		i915_gem_object_unlock(obj);
1041 1042
		if (err)
			return ERR_PTR(err);
1043

1044
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1045
					       PIN_MAPPABLE |
1046 1047
					       PIN_NONBLOCK /* NOWARN */ |
					       PIN_NOEVICT);
1048 1049
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1050
			err = drm_mm_insert_node_in_range
1051
				(&ggtt->vm.mm, &cache->node,
1052
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1053
				 0, ggtt->mappable_end,
1054
				 DRM_MM_INSERT_LOW);
1055
			if (err) /* no inactive aperture space, use cpu reloc */
1056
				return NULL;
1057 1058 1059
		} else {
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1060
		}
1061
	}
1062

1063 1064
	offset = cache->node.start;
	if (cache->node.allocated) {
1065 1066 1067
		ggtt->vm.insert_page(&ggtt->vm,
				     i915_gem_object_get_dma_address(obj, page),
				     offset, I915_CACHE_NONE, 0);
1068 1069
	} else {
		offset += page << PAGE_SHIFT;
1070 1071
	}

1072
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1073
							 offset);
1074 1075
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1076

1077
	return vaddr;
1078 1079
}

1080 1081
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1082
			 unsigned long page)
1083
{
1084
	void *vaddr;
1085

1086 1087 1088 1089 1090 1091 1092 1093
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1094 1095
	}

1096
	return vaddr;
1097 1098
}

1099
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1100
{
1101 1102 1103 1104 1105
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1106

1107
		*addr = value;
1108

1109 1110
		/*
		 * Writes to the same cacheline are serialised by the CPU
1111 1112 1113 1114 1115 1116 1117 1118 1119
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1120 1121
}

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
	int err;

	i915_vma_lock(vma);

	if (obj->cache_dirty & ~obj->cache_coherent)
		i915_gem_clflush_object(obj, 0);
	obj->write_domain = 0;

	err = i915_request_await_object(rq, vma->obj, true);
	if (err == 0)
		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);

	i915_vma_unlock(vma);

	return err;
}

1142 1143 1144 1145 1146
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     struct i915_vma *vma,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
1147
	struct intel_engine_pool_node *pool;
1148
	struct i915_request *rq;
1149 1150 1151 1152
	struct i915_vma *batch;
	u32 *cmd;
	int err;

1153
	pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
1154 1155
	if (IS_ERR(pool))
		return PTR_ERR(pool);
1156

1157
	cmd = i915_gem_object_pin_map(pool->obj,
1158 1159 1160
				      cache->has_llc ?
				      I915_MAP_FORCE_WB :
				      I915_MAP_FORCE_WC);
1161 1162 1163 1164
	if (IS_ERR(cmd)) {
		err = PTR_ERR(cmd);
		goto out_pool;
	}
1165

1166
	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
1167 1168 1169 1170 1171 1172 1173 1174 1175
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
	}

	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
	if (err)
		goto err_unmap;

1176
	rq = intel_context_create_request(cache->ce);
1177 1178 1179 1180 1181
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

1182 1183 1184 1185
	err = intel_engine_pool_mark_active(pool, rq);
	if (err)
		goto err_request;

1186
	err = reloc_move_to_gpu(rq, vma);
1187 1188 1189 1190 1191 1192 1193
	if (err)
		goto err_request;

	err = eb->engine->emit_bb_start(rq,
					batch->node.start, PAGE_SIZE,
					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
	if (err)
1194
		goto skip_request;
1195

1196
	i915_vma_lock(batch);
1197 1198 1199
	err = i915_request_await_object(rq, batch->obj, false);
	if (err == 0)
		err = i915_vma_move_to_active(batch, rq, 0);
1200
	i915_vma_unlock(batch);
1201 1202
	if (err)
		goto skip_request;
1203 1204

	rq->batch = batch;
1205
	i915_vma_unpin(batch);
1206 1207 1208 1209 1210 1211

	cache->rq = rq;
	cache->rq_cmd = cmd;
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
1212
	goto out_pool;
1213

1214 1215
skip_request:
	i915_request_skip(rq, err);
1216
err_request:
1217
	i915_request_add(rq);
1218 1219 1220
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
1221 1222 1223
	i915_gem_object_unpin_map(pool->obj);
out_pool:
	intel_engine_pool_put(pool);
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	return err;
}

static u32 *reloc_gpu(struct i915_execbuffer *eb,
		      struct i915_vma *vma,
		      unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	u32 *cmd;

	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
		reloc_gpu_flush(cache);

	if (unlikely(!cache->rq)) {
		int err;

1240 1241 1242 1243
		/* If we need to copy for the cmdparser, we will stall anyway */
		if (eb_use_cmdparser(eb))
			return ERR_PTR(-EWOULDBLOCK);

1244 1245 1246
		if (!intel_engine_can_store_dword(eb->engine))
			return ERR_PTR(-ENODEV);

1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
		if (!cache->ce) {
			struct intel_context *ce;

			/*
			 * The CS pre-parser can pre-fetch commands across
			 * memory sync points and starting gen12 it is able to
			 * pre-fetch across BB_START and BB_END boundaries
			 * (within the same context). We therefore use a
			 * separate context gen12+ to guarantee that the reloc
			 * writes land before the parser gets to the target
			 * memory location.
			 */
			if (cache->gen >= 12)
				ce = intel_context_create(eb->context->gem_context,
							  eb->engine);
			else
				ce = intel_context_get(eb->context);
			if (IS_ERR(ce))
				return ERR_CAST(ce);

			cache->ce = ce;
		}

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
		err = __reloc_gpu_alloc(eb, vma, len);
		if (unlikely(err))
			return ERR_PTR(err);
	}

	cmd = cache->rq_cmd + cache->rq_size;
	cache->rq_size += len;

	return cmd;
}

1281 1282
static u64
relocate_entry(struct i915_vma *vma,
1283
	       const struct drm_i915_gem_relocation_entry *reloc,
1284 1285
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1286
{
1287
	u64 offset = reloc->offset;
1288 1289
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1290
	void *vaddr;
1291

1292 1293
	if (!eb->reloc_cache.vaddr &&
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1294
	     !dma_resv_test_signaled_rcu(vma->resv, true))) {
1295 1296 1297 1298 1299 1300 1301 1302 1303
		const unsigned int gen = eb->reloc_cache.gen;
		unsigned int len;
		u32 *batch;
		u64 addr;

		if (wide)
			len = offset & 7 ? 8 : 5;
		else if (gen >= 4)
			len = 4;
1304
		else
1305
			len = 3;
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350

		batch = reloc_gpu(eb, vma, len);
		if (IS_ERR(batch))
			goto repeat;

		addr = gen8_canonical_addr(vma->node.start + offset);
		if (wide) {
			if (offset & 7) {
				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);

				addr = gen8_canonical_addr(addr + 4);

				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = upper_32_bits(target_offset);
			} else {
				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);
				*batch++ = upper_32_bits(target_offset);
			}
		} else if (gen >= 6) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else if (gen >= 4) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else {
			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
			*batch++ = addr;
			*batch++ = target_offset;
		}

		goto out;
	}

1351
repeat:
1352
	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1353 1354 1355 1356 1357
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1358
			eb->reloc_cache.vaddr);
1359 1360 1361 1362 1363 1364

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1365 1366
	}

1367
out:
1368
	return target->node.start | UPDATE;
1369 1370
}

1371 1372 1373 1374
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
		  struct i915_vma *vma,
		  const struct drm_i915_gem_relocation_entry *reloc)
1375
{
1376
	struct i915_vma *target;
1377
	int err;
1378

1379
	/* we've already hold a reference to all valid objects */
1380 1381
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1382
		return -ENOENT;
1383

1384
	/* Validate that the target is in a valid r/w GPU domain */
1385
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1386
		DRM_DEBUG("reloc with multiple write domains: "
1387
			  "target %d offset %d "
1388
			  "read %08x write %08x",
1389
			  reloc->target_handle,
1390 1391 1392
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1393
		return -EINVAL;
1394
	}
1395 1396
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1397
		DRM_DEBUG("reloc with read/write non-GPU domains: "
1398
			  "target %d offset %d "
1399
			  "read %08x write %08x",
1400
			  reloc->target_handle,
1401 1402 1403
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1404
		return -EINVAL;
1405 1406
	}

1407
	if (reloc->write_domain) {
1408
		*target->exec_flags |= EXEC_OBJECT_WRITE;
1409

1410 1411 1412 1413 1414 1415 1416
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1417
		    IS_GEN(eb->i915, 6)) {
1418 1419 1420 1421 1422 1423
			err = i915_vma_bind(target, target->obj->cache_level,
					    PIN_GLOBAL);
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1424
	}
1425

1426 1427
	/*
	 * If the relocation already has the right value in it, no
1428 1429
	 * more work needs to be done.
	 */
1430 1431
	if (!DBG_FORCE_RELOC &&
	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1432
		return 0;
1433 1434

	/* Check that the relocation address is valid... */
1435
	if (unlikely(reloc->offset >
1436
		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1437
		DRM_DEBUG("Relocation beyond object bounds: "
1438 1439 1440 1441
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
			  (int)vma->size);
1442
		return -EINVAL;
1443
	}
1444
	if (unlikely(reloc->offset & 3)) {
1445
		DRM_DEBUG("Relocation not 4-byte aligned: "
1446 1447 1448
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1449
		return -EINVAL;
1450 1451
	}

1452 1453 1454 1455 1456 1457
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
1458
	 * out of our synchronisation.
1459
	 */
1460
	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1461

1462
	/* and update the user's relocation entry */
1463
	return relocate_entry(vma, reloc, eb, target);
1464 1465
}

1466
static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1467
{
1468
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1469 1470
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
1471
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1472
	unsigned int remain;
1473

1474
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1475
	remain = entry->relocation_count;
1476 1477
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1478

1479 1480 1481 1482 1483
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
1484
	if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1485 1486 1487 1488 1489 1490 1491
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1492

1493 1494
		/*
		 * This is the fast path and we cannot handle a pagefault
1495 1496 1497 1498 1499 1500 1501
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1502
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1503
		pagefault_enable();
1504 1505
		if (unlikely(copied)) {
			remain = -EFAULT;
1506 1507
			goto out;
		}
1508

1509
		remain -= count;
1510
		do {
1511
			u64 offset = eb_relocate_entry(eb, vma, r);
1512

1513 1514 1515
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1516
				goto out;
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
1540 1541 1542 1543
				if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
					remain = -EFAULT;
					goto out;
				}
1544
			}
1545 1546 1547
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1548
out:
1549
	reloc_cache_reset(&eb->reloc_cache);
1550
	return remain;
1551 1552 1553
}

static int
1554
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1555
{
1556
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1557 1558 1559 1560
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1561 1562

	for (i = 0; i < entry->relocation_count; i++) {
1563
		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1564

1565 1566 1567 1568
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1569
	}
1570 1571 1572 1573
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1574 1575
}

1576
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1577
{
1578 1579 1580
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1581

1582 1583 1584
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1585

1586 1587
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1588

1589 1590
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
1591
	if (!access_ok(addr, size))
1592
		return -EFAULT;
1593

1594 1595 1596 1597 1598
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1599
	}
1600
	return __get_user(c, end - 1);
1601
}
1602

1603
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1604
{
1605 1606 1607
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1608

1609 1610 1611 1612 1613 1614
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		struct drm_i915_gem_relocation_entry *relocs;
		unsigned long size;
		unsigned long copied;
1615

1616 1617
		if (nreloc == 0)
			continue;
1618

1619 1620 1621
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1622

1623 1624
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1625

1626
		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1627 1628 1629 1630
		if (!relocs) {
			err = -ENOMEM;
			goto err;
		}
1631

1632 1633 1634 1635 1636 1637 1638
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
1639
					     (char __user *)urelocs + copied,
1640
					     len)) {
1641
end_user:
1642
				user_access_end();
1643
end:
1644 1645 1646 1647
				kvfree(relocs);
				err = -EFAULT;
				goto err;
			}
1648

1649 1650
			copied += len;
		} while (copied < size);
1651

1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
1662
		if (!user_access_begin(urelocs, size))
1663
			goto end;
1664

1665 1666 1667 1668 1669
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
		user_access_end();
1670

1671 1672
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1673

1674
	return 0;
1675

1676 1677 1678 1679 1680 1681 1682 1683
err:
	while (i--) {
		struct drm_i915_gem_relocation_entry *relocs =
			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1684 1685
}

1686
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1687
{
1688 1689
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1690

1691
	if (unlikely(i915_modparams.prefault_disable))
1692
		return 0;
1693

1694 1695
	for (i = 0; i < count; i++) {
		int err;
1696

1697 1698 1699 1700
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1701

1702
	return 0;
1703 1704
}

1705
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1706
{
1707
	struct drm_device *dev = &eb->i915->drm;
1708
	bool have_copy = false;
1709
	struct i915_vma *vma;
1710 1711 1712 1713 1714 1715 1716
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1717

1718
	/* We may process another execbuffer during the unlock... */
1719
	eb_reset_vmas(eb);
1720 1721
	mutex_unlock(&dev->struct_mutex);

1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1743
	}
1744 1745 1746
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1747 1748
	}

1749 1750 1751
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1752 1753
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1754
		mutex_lock(&dev->struct_mutex);
1755
		goto out;
1756 1757
	}

1758
	/* reacquire the objects */
1759 1760
	err = eb_lookup_vmas(eb);
	if (err)
1761
		goto err;
1762

1763 1764
	GEM_BUG_ON(!eb->batch);

1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	list_for_each_entry(vma, &eb->relocs, reloc_link) {
		if (!have_copy) {
			pagefault_disable();
			err = eb_relocate_vma(eb, vma);
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
			err = eb_relocate_vma_slow(eb, vma);
			if (err)
				goto err;
		}
1777 1778
	}

1779 1780
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1781 1782 1783 1784 1785 1786
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

1808
	return err;
1809 1810
}

1811
static int eb_relocate(struct i915_execbuffer *eb)
1812
{
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
	if (eb_lookup_vmas(eb))
		goto slow;

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, vma))
				goto slow;
		}
	}

	return 0;

slow:
	return eb_relocate_slow(eb);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
1835
	struct ww_acquire_ctx acquire;
1836
	unsigned int i;
1837 1838 1839
	int err = 0;

	ww_acquire_init(&acquire, &reservation_ww_class);
1840

1841
	for (i = 0; i < count; i++) {
1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
		struct i915_vma *vma = eb->vma[i];

		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
		if (!err)
			continue;

		GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */

		if (err == -EDEADLK) {
			GEM_BUG_ON(i == 0);
			do {
				int j = i - 1;

				ww_mutex_unlock(&eb->vma[j]->resv->lock);

				swap(eb->flags[i], eb->flags[j]);
				swap(eb->vma[i],  eb->vma[j]);
				eb->vma[i]->exec_flags = &eb->flags[i];
			} while (--i);
			GEM_BUG_ON(vma != eb->vma[0]);
			vma->exec_flags = &eb->flags[0];

			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
							       &acquire);
		}
		if (err)
			break;
	}
	ww_acquire_done(&acquire);

	while (i--) {
1873 1874
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];
1875
		struct drm_i915_gem_object *obj = vma->obj;
1876

1877 1878
		assert_vma_held(vma);

1879
		if (flags & EXEC_OBJECT_CAPTURE) {
1880
			struct i915_capture_list *capture;
1881 1882

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1883 1884 1885 1886 1887
			if (capture) {
				capture->next = eb->request->capture_list;
				capture->vma = vma;
				eb->request->capture_list = capture;
			}
1888 1889
		}

1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
		/*
		 * If the GPU is not _reading_ through the CPU cache, we need
		 * to make sure that any writes (both previous GPU writes from
		 * before a change in snooping levels and normal CPU writes)
		 * caught in that cache are flushed to main memory.
		 *
		 * We want to say
		 *   obj->cache_dirty &&
		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		 * but gcc's optimiser doesn't handle that as well and emits
		 * two jumps instead of one. Maybe one day...
		 */
		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1903
			if (i915_gem_clflush_object(obj, 0))
1904
				flags &= ~EXEC_OBJECT_ASYNC;
1905 1906
		}

1907 1908 1909 1910
		if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
			err = i915_request_await_object
				(eb->request, obj, flags & EXEC_OBJECT_WRITE);
		}
1911

1912 1913
		if (err == 0)
			err = i915_vma_move_to_active(vma, eb->request, flags);
1914

1915
		i915_vma_unlock(vma);
1916

1917 1918 1919 1920
		__eb_unreserve_vma(vma, flags);
		vma->exec_flags = NULL;

		if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1921
			i915_vma_put(vma);
1922
	}
1923 1924 1925 1926 1927
	ww_acquire_fini(&acquire);

	if (unlikely(err))
		goto err_skip;

1928
	eb->exec = NULL;
1929

1930
	/* Unconditionally flush any chipset caches (for streaming writes). */
1931
	intel_gt_chipset_flush(eb->engine->gt);
1932
	return 0;
1933 1934 1935 1936

err_skip:
	i915_request_skip(eb->request, err);
	return err;
1937 1938
}

1939
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1940
{
1941
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1942 1943
		return false;

C
Chris Wilson 已提交
1944
	/* Kernel clipping was a DRI1 misfeature */
1945 1946 1947 1948
	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
		if (exec->num_cliprects || exec->cliprects_ptr)
			return false;
	}
C
Chris Wilson 已提交
1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1961 1962
}

1963
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1964
{
1965 1966
	u32 *cs;
	int i;
1967

1968
	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
1969 1970 1971
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1972

1973
	cs = intel_ring_begin(rq, 4 * 2 + 2);
1974 1975
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1976

1977
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1978
	for (i = 0; i < 4; i++) {
1979 1980
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1981
	}
1982
	*cs++ = MI_NOOP;
1983
	intel_ring_advance(rq, cs);
1984 1985 1986 1987

	return 0;
}

1988
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1989
{
1990
	struct intel_engine_pool_node *pool;
1991
	struct i915_vma *vma;
1992
	int err;
1993

1994
	pool = intel_engine_get_pool(eb->engine, eb->batch_len);
1995 1996
	if (IS_ERR(pool))
		return ERR_CAST(pool);
1997

1998
	err = intel_engine_cmd_parser(eb->engine,
1999
				      eb->batch->obj,
2000
				      pool->obj,
2001 2002
				      eb->batch_start_offset,
				      eb->batch_len,
2003
				      is_master);
2004 2005
	if (err) {
		if (err == -EACCES) /* unhandled chained batch */
C
Chris Wilson 已提交
2006 2007
			vma = NULL;
		else
2008
			vma = ERR_PTR(err);
2009
		goto err;
C
Chris Wilson 已提交
2010
	}
2011

2012
	vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2013
	if (IS_ERR(vma))
2014
		goto err;
C
Chris Wilson 已提交
2015

2016 2017 2018 2019 2020
	eb->vma[eb->buffer_count] = i915_vma_get(vma);
	eb->flags[eb->buffer_count] =
		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
	vma->exec_flags = &eb->flags[eb->buffer_count];
	eb->buffer_count++;
2021

2022 2023 2024 2025 2026
	vma->private = pool;
	return vma;

err:
	intel_engine_pool_put(pool);
C
Chris Wilson 已提交
2027
	return vma;
2028
}
2029

2030
static void
2031
add_to_client(struct i915_request *rq, struct drm_file *file)
2032
{
2033 2034 2035 2036 2037 2038 2039
	struct drm_i915_file_private *file_priv = file->driver_priv;

	rq->file_priv = file_priv;

	spin_lock(&file_priv->mm.lock);
	list_add_tail(&rq->client_link, &file_priv->mm.request_list);
	spin_unlock(&file_priv->mm.lock);
2040 2041
}

2042
static int eb_submit(struct i915_execbuffer *eb)
2043
{
2044
	int err;
2045

2046 2047 2048
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
2049

2050
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2051 2052 2053
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
2054 2055
	}

2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
	/*
	 * After we completed waiting for other engines (using HW semaphores)
	 * then we can signal that this request/batch is ready to run. This
	 * allows us to determine if the batch is still waiting on the GPU
	 * or actually running by checking the breadcrumb.
	 */
	if (eb->engine->emit_init_breadcrumb) {
		err = eb->engine->emit_init_breadcrumb(eb->request);
		if (err)
			return err;
	}

2068
	err = eb->engine->emit_bb_start(eb->request,
2069 2070 2071
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
2072 2073 2074
					eb->batch_flags);
	if (err)
		return err;
2075

C
Chris Wilson 已提交
2076
	return 0;
2077 2078
}

2079 2080 2081 2082 2083 2084
static int num_vcs_engines(const struct drm_i915_private *i915)
{
	return hweight64(INTEL_INFO(i915)->engine_mask &
			 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
}

2085
/*
2086
 * Find one BSD ring to dispatch the corresponding BSD command.
2087
 * The engine index is returned.
2088
 */
2089
static unsigned int
2090 2091
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
2092 2093 2094
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

2095
	/* Check whether the file_priv has already selected one ring. */
2096
	if ((int)file_priv->bsd_engine < 0)
2097 2098
		file_priv->bsd_engine =
			get_random_int() % num_vcs_engines(dev_priv);
2099

2100
	return file_priv->bsd_engine;
2101 2102
}

2103
static const enum intel_engine_id user_ring_map[] = {
2104 2105 2106 2107 2108
	[I915_EXEC_DEFAULT]	= RCS0,
	[I915_EXEC_RENDER]	= RCS0,
	[I915_EXEC_BLT]		= BCS0,
	[I915_EXEC_BSD]		= VCS0,
	[I915_EXEC_VEBOX]	= VECS0
2109 2110
};

2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
static struct i915_request *eb_throttle(struct intel_context *ce)
{
	struct intel_ring *ring = ce->ring;
	struct intel_timeline *tl = ce->timeline;
	struct i915_request *rq;

	/*
	 * Completely unscientific finger-in-the-air estimates for suitable
	 * maximum user request size (to avoid blocking) and then backoff.
	 */
	if (intel_ring_update_space(ring) >= PAGE_SIZE)
		return NULL;

	/*
	 * Find a request that after waiting upon, there will be at least half
	 * the ring available. The hysteresis allows us to compete for the
	 * shared ring and should mean that we sleep less often prior to
	 * claiming our resources, but not so long that the ring completely
	 * drains before we can submit our next request.
	 */
	list_for_each_entry(rq, &tl->requests, link) {
		if (rq->ring != ring)
			continue;

		if (__intel_ring_space(rq->postfix,
				       ring->emit, ring->size) > ring->size / 2)
			break;
	}
	if (&rq->link == &tl->requests)
		return NULL; /* weird, we will check again later for real */

	return i915_request_get(rq);
}

static int
__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
2147
{
2148
	int err;
2149

2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
	if (likely(atomic_inc_not_zero(&ce->pin_count)))
		return 0;

	err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
	if (err)
		return err;

	err = __intel_context_do_pin(ce);
	mutex_unlock(&eb->i915->drm.struct_mutex);

	return err;
}

static void
__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
{
	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
		return;

	mutex_lock(&eb->i915->drm.struct_mutex);
	intel_context_unpin(ce);
	mutex_unlock(&eb->i915->drm.struct_mutex);
}

static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
	struct intel_timeline *tl;
	struct i915_request *rq;
	int err;

2180 2181 2182 2183
	/*
	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
	 * EIO if the GPU is already wedged.
	 */
2184
	err = intel_gt_terminally_wedged(ce->engine->gt);
2185 2186 2187 2188 2189 2190 2191 2192
	if (err)
		return err;

	/*
	 * Pinning the contexts may generate requests in order to acquire
	 * GGTT space, so do this first before we reserve a seqno for
	 * ourselves.
	 */
2193
	err = __eb_pin_context(eb, ce);
2194 2195
	if (err)
		return err;
2196

2197 2198 2199 2200 2201 2202 2203 2204
	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
	 * we expect to access the hardware fairly frequently in the
	 * process, and require the engine to be kept awake between accesses.
	 * Upon dispatch, we acquire another prolonged wakeref that we hold
	 * until the timeline is idle, which in turn releases the wakeref
	 * taken on the engine, and the parent device.
	 */
2205 2206 2207
	tl = intel_context_timeline_lock(ce);
	if (IS_ERR(tl)) {
		err = PTR_ERR(tl);
2208
		goto err_unpin;
2209
	}
2210 2211

	intel_context_enter(ce);
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226
	rq = eb_throttle(ce);

	intel_context_timeline_unlock(tl);

	if (rq) {
		if (i915_request_wait(rq,
				      I915_WAIT_INTERRUPTIBLE,
				      MAX_SCHEDULE_TIMEOUT) < 0) {
			i915_request_put(rq);
			err = -EINTR;
			goto err_exit;
		}

		i915_request_put(rq);
	}
2227

2228
	eb->engine = ce->engine;
2229 2230
	eb->context = ce;
	return 0;
2231

2232 2233 2234 2235
err_exit:
	mutex_lock(&tl->mutex);
	intel_context_exit(ce);
	intel_context_timeline_unlock(tl);
2236
err_unpin:
2237
	__eb_unpin_context(eb, ce);
2238
	return err;
2239 2240
}

2241
static void eb_unpin_engine(struct i915_execbuffer *eb)
2242
{
2243
	struct intel_context *ce = eb->context;
2244
	struct intel_timeline *tl = ce->timeline;
2245 2246 2247 2248 2249

	mutex_lock(&tl->mutex);
	intel_context_exit(ce);
	mutex_unlock(&tl->mutex);

2250
	__eb_unpin_context(eb, ce);
2251
}
2252

2253 2254 2255 2256
static unsigned int
eb_select_legacy_ring(struct i915_execbuffer *eb,
		      struct drm_file *file,
		      struct drm_i915_gem_execbuffer2 *args)
2257
{
2258
	struct drm_i915_private *i915 = eb->i915;
2259 2260
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;

2261 2262
	if (user_ring_id != I915_EXEC_BSD &&
	    (args->flags & I915_EXEC_BSD_MASK)) {
2263 2264
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
2265
		return -1;
2266 2267
	}

2268
	if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2269 2270 2271
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2272
			bsd_idx = gen8_dispatch_bsd_engine(i915, file);
2273 2274
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2275
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2276 2277 2278 2279
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
2280
			return -1;
2281 2282
		}

2283
		return _VCS(bsd_idx);
2284 2285
	}

2286 2287 2288
	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
		return -1;
2289 2290
	}

2291 2292 2293 2294
	return user_ring_map[user_ring_id];
}

static int
2295 2296 2297
eb_pin_engine(struct i915_execbuffer *eb,
	      struct drm_file *file,
	      struct drm_i915_gem_execbuffer2 *args)
2298 2299 2300 2301 2302
{
	struct intel_context *ce;
	unsigned int idx;
	int err;

2303 2304 2305 2306
	if (i915_gem_context_user_engines(eb->gem_context))
		idx = args->flags & I915_EXEC_RING_MASK;
	else
		idx = eb_select_legacy_ring(eb, file, args);
2307 2308 2309 2310 2311

	ce = i915_gem_context_get_engine(eb->gem_context, idx);
	if (IS_ERR(ce))
		return PTR_ERR(ce);

2312
	err = __eb_pin_engine(eb, ce);
2313 2314 2315
	intel_context_put(ce);

	return err;
2316 2317
}

2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
static void
__free_fence_array(struct drm_syncobj **fences, unsigned int n)
{
	while (n--)
		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
	kvfree(fences);
}

static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_file *file)
{
2330
	const unsigned long nfences = args->num_cliprects;
2331 2332
	struct drm_i915_gem_exec_fence __user *user;
	struct drm_syncobj **fences;
2333
	unsigned long n;
2334 2335 2336 2337 2338
	int err;

	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
		return NULL;

2339 2340 2341 2342 2343
	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
	if (nfences > min_t(unsigned long,
			    ULONG_MAX / sizeof(*user),
			    SIZE_MAX / sizeof(*fences)))
2344 2345 2346
		return ERR_PTR(-EINVAL);

	user = u64_to_user_ptr(args->cliprects_ptr);
2347
	if (!access_ok(user, nfences * sizeof(*user)))
2348 2349
		return ERR_PTR(-EFAULT);

2350
	fences = kvmalloc_array(nfences, sizeof(*fences),
2351
				__GFP_NOWARN | GFP_KERNEL);
2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
	if (!fences)
		return ERR_PTR(-ENOMEM);

	for (n = 0; n < nfences; n++) {
		struct drm_i915_gem_exec_fence fence;
		struct drm_syncobj *syncobj;

		if (__copy_from_user(&fence, user++, sizeof(fence))) {
			err = -EFAULT;
			goto err;
		}

2364 2365 2366 2367 2368
		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
			err = -EINVAL;
			goto err;
		}

2369 2370 2371 2372 2373 2374 2375
		syncobj = drm_syncobj_find(file, fence.handle);
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
			err = -ENOENT;
			goto err;
		}

2376 2377 2378
		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);

2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
	}

	return fences;

err:
	__free_fence_array(fences, n);
	return ERR_PTR(err);
}

static void
put_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_syncobj **fences)
{
	if (fences)
		__free_fence_array(fences, args->num_cliprects);
}

static int
await_fence_array(struct i915_execbuffer *eb,
		  struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	unsigned int n;
	int err;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		struct dma_fence *fence;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_WAIT))
			continue;

J
Jason Ekstrand 已提交
2414
		fence = drm_syncobj_fence_get(syncobj);
2415 2416 2417
		if (!fence)
			return -EINVAL;

2418
		err = i915_request_await_dma_fence(eb->request, fence);
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
		dma_fence_put(fence);
		if (err < 0)
			return err;
	}

	return 0;
}

static void
signal_fence_array(struct i915_execbuffer *eb,
		   struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	struct dma_fence * const fence = &eb->request->fence;
	unsigned int n;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_SIGNAL))
			continue;

2443
		drm_syncobj_replace_fence(syncobj, fence);
2444 2445 2446
	}
}

2447
static int
2448
i915_gem_do_execbuffer(struct drm_device *dev,
2449 2450
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
2451 2452
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_syncobj **fences)
2453
{
2454
	struct i915_execbuffer eb;
2455
	struct dma_fence *in_fence = NULL;
2456
	struct dma_fence *exec_fence = NULL;
2457 2458
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
2459
	int err;
2460

2461
	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2462 2463
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2464

2465 2466 2467
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
2468
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2469
		args->flags |= __EXEC_HAS_RELOC;
2470

2471
	eb.exec = exec;
2472 2473
	eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
	eb.vma[0] = NULL;
2474 2475
	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);

2476
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2477 2478
	reloc_cache_init(&eb.reloc_cache, eb.i915);

2479
	eb.buffer_count = args->buffer_count;
2480 2481 2482
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

2483
	eb.batch_flags = 0;
2484
	if (args->flags & I915_EXEC_SECURE) {
2485
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2486 2487
		    return -EPERM;

2488
		eb.batch_flags |= I915_DISPATCH_SECURE;
2489
	}
2490
	if (args->flags & I915_EXEC_IS_PINNED)
2491
		eb.batch_flags |= I915_DISPATCH_PINNED;
2492

2493 2494
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2495 2496
		if (!in_fence)
			return -EINVAL;
2497 2498
	}

2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
	if (args->flags & I915_EXEC_FENCE_SUBMIT) {
		if (in_fence) {
			err = -EINVAL;
			goto err_in_fence;
		}

		exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
		if (!exec_fence) {
			err = -EINVAL;
			goto err_in_fence;
		}
	}

2512 2513 2514
	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
2515
			err = out_fence_fd;
2516
			goto err_exec_fence;
2517 2518 2519
		}
	}

2520 2521 2522 2523 2524
	err = eb_create(&eb);
	if (err)
		goto err_out_fence;

	GEM_BUG_ON(!eb.lut_size);
2525

2526 2527 2528 2529
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_destroy;

2530
	err = eb_pin_engine(&eb, file, args);
2531
	if (unlikely(err))
2532
		goto err_context;
2533

2534 2535
	err = i915_mutex_lock_interruptible(dev);
	if (err)
2536 2537
		goto err_engine;

2538
	err = eb_relocate(&eb);
2539
	if (err) {
2540 2541 2542 2543 2544 2545 2546 2547 2548
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
		goto err_vma;
2549
	}
2550

2551
	if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2552
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2553 2554
		err = -EINVAL;
		goto err_vma;
2555
	}
2556 2557
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2558
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2559 2560
		err = -EINVAL;
		goto err_vma;
2561
	}
2562

2563
	if (eb_use_cmdparser(&eb)) {
2564 2565
		struct i915_vma *vma;

2566
		vma = eb_parse(&eb, drm_is_current_master(file));
2567
		if (IS_ERR(vma)) {
2568 2569
			err = PTR_ERR(vma);
			goto err_vma;
2570
		}
2571

2572
		if (vma) {
2573 2574 2575 2576 2577 2578 2579 2580 2581
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
2582
			eb.batch_flags |= I915_DISPATCH_SECURE;
2583 2584
			eb.batch_start_offset = 0;
			eb.batch = vma;
2585
		}
2586 2587
	}

2588 2589
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
2590

2591 2592
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2593
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2594
	 * hsw should have this fixed, but bdw mucks it up again. */
2595
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2596
		struct i915_vma *vma;
2597

2598 2599 2600 2601 2602 2603
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2604
		 *   so we don't really have issues with multiple objects not
2605 2606 2607
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2608
		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2609
		if (IS_ERR(vma)) {
2610 2611
			err = PTR_ERR(vma);
			goto err_vma;
C
Chris Wilson 已提交
2612
		}
2613

2614
		eb.batch = vma;
2615
	}
2616

2617 2618 2619
	/* All GPU relocation batches must be submitted prior to the user rq */
	GEM_BUG_ON(eb.reloc_cache.rq);

2620
	/* Allocate a request for this batch buffer nice and early. */
2621
	eb.request = i915_request_create(eb.context);
2622
	if (IS_ERR(eb.request)) {
2623
		err = PTR_ERR(eb.request);
2624
		goto err_batch_unpin;
2625
	}
2626

2627
	if (in_fence) {
2628
		err = i915_request_await_dma_fence(eb.request, in_fence);
2629
		if (err < 0)
2630 2631 2632
			goto err_request;
	}

2633 2634 2635 2636 2637 2638 2639
	if (exec_fence) {
		err = i915_request_await_execution(eb.request, exec_fence,
						   eb.engine->bond_execute);
		if (err < 0)
			goto err_request;
	}

2640 2641 2642 2643 2644 2645
	if (fences) {
		err = await_fence_array(&eb, fences);
		if (err)
			goto err_request;
	}

2646
	if (out_fence_fd != -1) {
2647
		out_fence = sync_file_create(&eb.request->fence);
2648
		if (!out_fence) {
2649
			err = -ENOMEM;
2650 2651 2652 2653
			goto err_request;
		}
	}

2654 2655
	/*
	 * Whilst this request exists, batch_obj will be on the
2656 2657 2658 2659 2660
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2661
	eb.request->batch = eb.batch;
2662 2663
	if (eb.batch->private)
		intel_engine_pool_mark_active(eb.batch->private, eb.request);
2664

2665
	trace_i915_request_queue(eb.request, eb.batch_flags);
2666
	err = eb_submit(&eb);
2667
err_request:
2668
	add_to_client(eb.request, file);
2669
	i915_request_add(eb.request);
2670

2671 2672 2673
	if (fences)
		signal_fence_array(&eb, fences);

2674
	if (out_fence) {
2675
		if (err == 0) {
2676
			fd_install(out_fence_fd, out_fence->file);
2677
			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2678 2679 2680 2681 2682 2683
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2684

2685
err_batch_unpin:
2686
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2687
		i915_vma_unpin(eb.batch);
2688 2689
	if (eb.batch->private)
		intel_engine_pool_put(eb.batch->private);
2690 2691 2692
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
2693
	mutex_unlock(&dev->struct_mutex);
2694 2695
err_engine:
	eb_unpin_engine(&eb);
2696
err_context:
2697
	i915_gem_context_put(eb.gem_context);
2698
err_destroy:
2699
	eb_destroy(&eb);
2700
err_out_fence:
2701 2702
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2703 2704
err_exec_fence:
	dma_fence_put(exec_fence);
2705
err_in_fence:
2706
	dma_fence_put(in_fence);
2707
	return err;
2708 2709
}

2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
static size_t eb_element_size(void)
{
	return (sizeof(struct drm_i915_gem_exec_object2) +
		sizeof(struct i915_vma *) +
		sizeof(unsigned int));
}

static bool check_buffer_count(size_t count)
{
	const size_t sz = eb_element_size();

	/*
	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
	 * array size (see eb_create()). Otherwise, we can accept an array as
	 * large as can be addressed (though use large arrays at your peril)!
	 */

	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}

2730 2731 2732 2733 2734
/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
2735 2736
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
2737 2738 2739 2740 2741
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2742
	const size_t count = args->buffer_count;
2743 2744
	unsigned int i;
	int err;
2745

2746 2747
	if (!check_buffer_count(count)) {
		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2748 2749 2750
		return -EINVAL;
	}

2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

	if (!i915_gem_check_execbuffer(&exec2))
		return -EINVAL;

2765
	/* Copy in the exec list from userland */
2766
	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2767
				   __GFP_NOWARN | GFP_KERNEL);
2768
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2769
				    __GFP_NOWARN | GFP_KERNEL);
2770
	if (exec_list == NULL || exec2_list == NULL) {
2771
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2772
			  args->buffer_count);
M
Michal Hocko 已提交
2773 2774
		kvfree(exec_list);
		kvfree(exec2_list);
2775 2776
		return -ENOMEM;
	}
2777
	err = copy_from_user(exec_list,
2778
			     u64_to_user_ptr(args->buffers_ptr),
2779
			     sizeof(*exec_list) * count);
2780
	if (err) {
2781
		DRM_DEBUG("copy %d exec entries failed %d\n",
2782
			  args->buffer_count, err);
M
Michal Hocko 已提交
2783 2784
		kvfree(exec_list);
		kvfree(exec2_list);
2785 2786 2787 2788 2789 2790 2791 2792 2793
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2794
		if (INTEL_GEN(to_i915(dev)) < 4)
2795 2796 2797 2798 2799
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2800
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2801
	if (exec2.flags & __EXEC_HAS_RELOC) {
2802
		struct drm_i915_gem_exec_object __user *user_exec_list =
2803
			u64_to_user_ptr(args->buffers_ptr);
2804

2805
		/* Copy the new buffer offsets back to the user's exec list. */
2806
		for (i = 0; i < args->buffer_count; i++) {
2807 2808 2809
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2810
			exec2_list[i].offset =
2811 2812 2813 2814 2815
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
2816
				break;
2817 2818 2819
		}
	}

M
Michal Hocko 已提交
2820 2821
	kvfree(exec_list);
	kvfree(exec2_list);
2822
	return err;
2823 2824 2825
}

int
2826 2827
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file)
2828 2829
{
	struct drm_i915_gem_execbuffer2 *args = data;
2830
	struct drm_i915_gem_exec_object2 *exec2_list;
2831
	struct drm_syncobj **fences = NULL;
2832
	const size_t count = args->buffer_count;
2833
	int err;
2834

2835 2836
	if (!check_buffer_count(count)) {
		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2837 2838 2839
		return -EINVAL;
	}

2840 2841 2842 2843
	if (!i915_gem_check_execbuffer(args))
		return -EINVAL;

	/* Allocate an extra slot for use by the command parser */
2844
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2845
				    __GFP_NOWARN | GFP_KERNEL);
2846
	if (exec2_list == NULL) {
2847 2848
		DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
			  count);
2849 2850
		return -ENOMEM;
	}
2851 2852
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
2853 2854
			   sizeof(*exec2_list) * count)) {
		DRM_DEBUG("copy %zd exec entries failed\n", count);
M
Michal Hocko 已提交
2855
		kvfree(exec2_list);
2856 2857 2858
		return -EFAULT;
	}

2859 2860 2861 2862 2863 2864 2865 2866 2867
	if (args->flags & I915_EXEC_FENCE_ARRAY) {
		fences = get_fence_array(args, file);
		if (IS_ERR(fences)) {
			kvfree(exec2_list);
			return PTR_ERR(fences);
		}
	}

	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2868 2869 2870 2871 2872 2873 2874 2875

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
2876
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2877 2878
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
2879

2880
		/* Copy the new buffer offsets back to the user's exec list. */
2881 2882 2883 2884 2885 2886 2887 2888
		/*
		 * Note: count * sizeof(*user_exec_list) does not overflow,
		 * because we checked 'count' in check_buffer_count().
		 *
		 * And this range already got effectively checked earlier
		 * when we did the "copy_from_user()" above.
		 */
		if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
2889
			goto end;
2890

2891
		for (i = 0; i < args->buffer_count; i++) {
2892 2893 2894
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2895
			exec2_list[i].offset =
2896 2897 2898 2899
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
2900
		}
2901 2902
end_user:
		user_access_end();
2903
end:;
2904 2905
	}

2906
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2907
	put_fence_array(args, fences);
M
Michal Hocko 已提交
2908
	kvfree(exec2_list);
2909
	return err;
2910
}