i915_gem_execbuffer.c 79.3 KB
Newer Older
1
/*
2
 * SPDX-License-Identifier: MIT
3
 *
4
 * Copyright © 2008,2010 Intel Corporation
5 6
 */

7
#include <linux/intel-iommu.h>
8
#include <linux/dma-resv.h>
9
#include <linux/sync_file.h>
10 11
#include <linux/uaccess.h>

12
#include <drm/drm_syncobj.h>
13

14 15
#include "display/intel_frontbuffer.h"

16
#include "gem/i915_gem_ioctls.h"
17
#include "gt/intel_context.h"
18
#include "gt/intel_engine_pool.h"
19
#include "gt/intel_gt.h"
20
#include "gt/intel_gt_pm.h"
21
#include "gt/intel_ring.h"
22

23
#include "i915_drv.h"
24
#include "i915_gem_clflush.h"
25
#include "i915_gem_context.h"
26
#include "i915_gem_ioctls.h"
27
#include "i915_sw_fence_work.h"
28 29
#include "i915_trace.h"

30 31 32 33 34 35 36 37 38 39 40 41 42
struct eb_vma {
	struct i915_vma *vma;
	unsigned int flags;

	/** This vma's place in the execbuf reservation list */
	struct drm_i915_gem_exec_object2 *exec;
	struct list_head bind_link;
	struct list_head reloc_link;

	struct hlist_node node;
	u32 handle;
};

43 44 45 46 47 48
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
49

50 51 52 53 54
#define __EXEC_OBJECT_HAS_PIN		BIT(31)
#define __EXEC_OBJECT_HAS_FENCE		BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(29)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(28)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 28) /* all of the above */
55 56 57
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
58
#define __EXEC_INTERNAL_FLAGS	(~0u << 31)
59
#define UPDATE			PIN_OFFSET_FIXED
60 61

#define BATCH_OFFSET_BIAS (256*1024)
62

63
#define __I915_EXEC_ILLEGAL_FLAGS \
64 65 66
	(__I915_EXEC_UNKNOWN_FLAGS | \
	 I915_EXEC_CONSTANTS_MASK  | \
	 I915_EXEC_RESOURCE_STREAMER)
67

68 69 70 71 72 73 74 75 76
/* Catch emission of unexpected errors for CI! */
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#undef EINVAL
#define EINVAL ({ \
	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
	22; \
})
#endif

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
 * At the level of talking to the hardware, submitting a batchbuffer for the
 * GPU to execute is to add content to a buffer from which the HW
 * command streamer is reading.
 *
 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
 *    Execlists, this command is not placed on the same buffer as the
 *    remaining items.
 *
 * 2. Add a command to invalidate caches to the buffer.
 *
 * 3. Add a batchbuffer start command to the buffer; the start command is
 *    essentially a token together with the GPU address of the batchbuffer
 *    to be executed.
 *
 * 4. Add a pipeline flush to the buffer.
 *
 * 5. Add a memory write command to the buffer to record when the GPU
 *    is done executing the batchbuffer. The memory write writes the
 *    global sequence number of the request, ``i915_request::global_seqno``;
 *    the i915 driver uses the current value in the register to determine
 *    if the GPU has completed the batchbuffer.
 *
 * 6. Add a user interrupt command to the buffer. This command instructs
 *    the GPU to issue an interrupt when the command, pipeline flush and
 *    memory write are completed.
 *
 * 7. Inform the hardware of the additional commands added to the buffer
 *    (by updating the tail pointer).
 *
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

228
struct i915_execbuffer {
229 230 231 232
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
233
	struct eb_vma *vma;
234 235

	struct intel_engine_cs *engine; /** engine to queue the request to */
236 237
	struct intel_context *context; /* logical state for the request */
	struct i915_gem_context *gem_context; /** caller's context */
238

239
	struct i915_request *request; /** our request to build */
240
	struct eb_vma *batch; /** identity of the batch obj/vma */
241
	struct i915_vma *trampoline; /** trampoline used for chaining */
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
257
	struct reloc_cache {
258 259 260
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
261
		unsigned int gen; /** Cached value of INTEL_GEN */
262
		bool use_64bit_reloc : 1;
263 264 265
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
266

267
		struct i915_request *rq;
268 269
		u32 *rq_cmd;
		unsigned int rq_size;
270
	} reloc_cache;
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
286 287
};

288 289
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
290
	return intel_engine_requires_cmd_parser(eb->engine) ||
291 292
		(intel_engine_using_cmd_parser(eb->engine) &&
		 eb->args->batch_len);
293 294
}

295
static int eb_create(struct i915_execbuffer *eb)
296
{
297 298
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
299

300 301 302 303 304 305 306 307 308 309 310
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
311
		do {
312
			gfp_t flags;
313 314 315 316 317 318 319

			/* While we can still reduce the allocation size, don't
			 * raise a warning and allow the allocation to fail.
			 * On the last pass though, we want to try as hard
			 * as possible to perform the allocation and warn
			 * if it fails.
			 */
320
			flags = GFP_KERNEL;
321 322 323
			if (size > 1)
				flags |= __GFP_NORETRY | __GFP_NOWARN;

324
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
325
					      flags);
326 327 328 329
			if (eb->buckets)
				break;
		} while (--size);

330 331
		if (unlikely(!size))
			return -ENOMEM;
332

333
		eb->lut_size = size;
334
	} else {
335
		eb->lut_size = -eb->buffer_count;
336
	}
337

338
	return 0;
339 340
}

341 342
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
343 344
		 const struct i915_vma *vma,
		 unsigned int flags)
345 346 347 348 349 350 351
{
	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

352
	if (flags & EXEC_OBJECT_PINNED &&
353 354 355
	    vma->node.start != entry->offset)
		return true;

356
	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
357 358 359
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

360
	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
361 362 363
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

364 365 366 367
	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
		return true;

368 369 370
	return false;
}

371
static inline bool
372
eb_pin_vma(struct i915_execbuffer *eb,
373
	   const struct drm_i915_gem_exec_object2 *entry,
374
	   struct eb_vma *ev)
375
{
376
	struct i915_vma *vma = ev->vma;
377
	u64 pin_flags;
378

379
	if (vma->node.size)
380
		pin_flags = vma->node.start;
381
	else
382
		pin_flags = entry->offset & PIN_OFFSET_MASK;
383

384
	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
385
	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
386
		pin_flags |= PIN_GLOBAL;
387

388 389
	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
		return false;
390

391
	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
392
		if (unlikely(i915_vma_pin_fence(vma))) {
393
			i915_vma_unpin(vma);
394
			return false;
395 396
		}

397
		if (vma->fence)
398
			ev->flags |= __EXEC_OBJECT_HAS_FENCE;
399 400
	}

401 402
	ev->flags |= __EXEC_OBJECT_HAS_PIN;
	return !eb_vma_misplaced(entry, vma, ev->flags);
403 404
}

405
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
406
{
407
	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
408

409
	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
410
		__i915_vma_unpin_fence(vma);
411

412
	__i915_vma_unpin(vma);
413 414
}

415
static inline void
416
eb_unreserve_vma(struct eb_vma *ev)
417
{
418
	if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
419
		return;
420

421 422
	__eb_unreserve_vma(ev->vma, ev->flags);
	ev->flags &= ~__EXEC_OBJECT_RESERVED;
423 424
}

425 426 427 428
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
429
{
430 431
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
432

433 434
	if (unlikely(entry->alignment &&
		     !is_power_of_2_u64(entry->alignment)))
435 436 437 438 439 440 441
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
442
		     entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
443 444 445 446 447 448 449 450
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
451
	}
452 453 454 455 456 457 458
	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

459 460 461 462 463 464 465 466 467 468 469 470
	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

471
	return 0;
472 473
}

474
static void
475 476 477
eb_add_vma(struct i915_execbuffer *eb,
	   unsigned int i, unsigned batch_idx,
	   struct i915_vma *vma)
478
{
479
	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
480
	struct eb_vma *ev = &eb->vma[i];
481 482 483

	GEM_BUG_ON(i915_vma_is_closed(vma));

484
	ev->vma = i915_vma_get(vma);
485 486 487
	ev->exec = entry;
	ev->flags = entry->flags;

488
	if (eb->lut_size > 0) {
489 490
		ev->handle = entry->handle;
		hlist_add_head(&ev->node,
491 492
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
493
	}
494

495
	if (entry->relocation_count)
496
		list_add_tail(&ev->reloc_link, &eb->relocs);
497

498 499 500 501 502 503 504 505 506 507
	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	if (i == batch_idx) {
508
		if (entry->relocation_count &&
509 510
		    !(ev->flags & EXEC_OBJECT_PINNED))
			ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
511
		if (eb->reloc_cache.has_fence)
512
			ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
513

514
		eb->batch = ev;
515 516
	}

517
	if (eb_pin_vma(eb, entry, ev)) {
518 519 520 521
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
522
	} else {
523 524
		eb_unreserve_vma(ev);
		list_add_tail(&ev->bind_link, &eb->unbound);
525 526 527 528 529 530 531 532 533
	}
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

534 535 536 537 538
	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;
539 540 541 542 543 544 545

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
546
			  struct eb_vma *ev,
547
			  u64 pin_flags)
548
{
549 550 551
	struct drm_i915_gem_exec_object2 *entry = ev->exec;
	unsigned int exec_flags = ev->flags;
	struct i915_vma *vma = ev->vma;
552 553
	int err;

554 555
	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
		pin_flags |= PIN_GLOBAL;
556 557 558 559 560

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
561 562
	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		pin_flags |= PIN_ZONE_4G;
563

564 565
	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
		pin_flags |= PIN_MAPPABLE;
566

567
	if (exec_flags & EXEC_OBJECT_PINNED)
568
		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
569
	else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
570
		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
571

572 573 574 575 576 577 578
	if (drm_mm_node_allocated(&vma->node) &&
	    eb_vma_misplaced(entry, vma, ev->flags)) {
		err = i915_vma_unbind(vma);
		if (err)
			return err;
	}

579 580 581
	err = i915_vma_pin(vma,
			   entry->pad_to_size, entry->alignment,
			   pin_flags);
582 583 584 585 586 587 588 589
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

590
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
591
		err = i915_vma_pin_fence(vma);
592 593 594 595 596
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

597
		if (vma->fence)
598
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
599 600
	}

601 602
	ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
603

604 605 606 607 608 609
	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
610
	unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
611
	struct list_head last;
612
	struct eb_vma *ev;
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
633 634
		list_for_each_entry(ev, &eb->unbound, bind_link) {
			err = eb_reserve_vma(eb, ev, pin_flags);
635 636 637
			if (err)
				break;
		}
638
		if (!(err == -ENOSPC || err == -EAGAIN))
639 640 641 642 643 644
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
645
			unsigned int flags;
646

647 648
			ev = &eb->vma[i];
			flags = ev->flags;
649 650
			if (flags & EXEC_OBJECT_PINNED &&
			    flags & __EXEC_OBJECT_HAS_PIN)
651 652
				continue;

653
			eb_unreserve_vma(ev);
654

655
			if (flags & EXEC_OBJECT_PINNED)
656
				/* Pinned must have their slot */
657
				list_add(&ev->bind_link, &eb->unbound);
658
			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
659
				/* Map require the lowest 256MiB (aperture) */
660
				list_add_tail(&ev->bind_link, &eb->unbound);
661 662
			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
				/* Prioritise 4GiB region for restricted bo */
663
				list_add(&ev->bind_link, &last);
664
			else
665
				list_add_tail(&ev->bind_link, &last);
666 667 668
		}
		list_splice_tail(&last, &eb->unbound);

669 670 671 672 673
		if (err == -EAGAIN) {
			flush_workqueue(eb->i915->mm.userptr_wq);
			continue;
		}

674 675 676 677 678 679
		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
680
			mutex_lock(&eb->context->vm->mutex);
681
			err = i915_gem_evict_vm(eb->context->vm);
682
			mutex_unlock(&eb->context->vm->mutex);
683 684 685 686 687 688 689
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
690 691

		pin_flags = PIN_USER;
692
	} while (1);
693
}
694

695 696
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
697 698 699 700
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
701 702 703 704 705 706 707
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
708 709
	if (unlikely(!ctx))
		return -ENOENT;
710

711
	eb->gem_context = ctx;
712
	if (rcu_access_pointer(ctx->vm))
713
		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
714 715

	eb->context_flags = 0;
716
	if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
717 718 719 720 721 722
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
723
{
724
	struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
725
	struct drm_i915_gem_object *obj;
726
	unsigned int i, batch;
727
	int err;
728

729 730 731
	if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
		return -ENOENT;

732 733
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
734

735 736
	batch = eb_batch_index(eb);

737 738
	for (i = 0; i < eb->buffer_count; i++) {
		u32 handle = eb->exec[i].handle;
739
		struct i915_lut_handle *lut;
740
		struct i915_vma *vma;
741

742 743
		vma = radix_tree_lookup(handles_vma, handle);
		if (likely(vma))
744
			goto add_vma;
745

746
		obj = i915_gem_object_lookup(eb->file, handle);
747
		if (unlikely(!obj)) {
748
			err = -ENOENT;
749
			goto err_vma;
750 751
		}

752
		vma = i915_vma_instance(obj, eb->context->vm, NULL);
753
		if (IS_ERR(vma)) {
754
			err = PTR_ERR(vma);
755
			goto err_obj;
756 757
		}

758
		lut = i915_lut_handle_alloc();
759 760 761 762 763 764 765
		if (unlikely(!lut)) {
			err = -ENOMEM;
			goto err_obj;
		}

		err = radix_tree_insert(handles_vma, handle, vma);
		if (unlikely(err)) {
766
			i915_lut_handle_free(lut);
767
			goto err_obj;
768
		}
769

770 771
		/* transfer ref to lut */
		if (!atomic_fetch_inc(&vma->open_count))
772
			i915_vma_reopen(vma);
773
		lut->handle = handle;
774 775 776 777 778
		lut->ctx = eb->gem_context;

		i915_gem_object_lock(obj);
		list_add(&lut->obj_link, &obj->lut_list);
		i915_gem_object_unlock(obj);
779

780
add_vma:
781
		err = eb_validate_vma(eb, &eb->exec[i], vma);
782
		if (unlikely(err))
783
			goto err_vma;
784

785
		eb_add_vma(eb, i, batch, vma);
786 787
	}

788
	return 0;
789

790
err_obj:
791
	i915_gem_object_put(obj);
792
err_vma:
793
	eb->vma[i].vma = NULL;
794
	return err;
795 796
}

797
static struct eb_vma *
798
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
799
{
800 801
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
802
			return NULL;
803
		return &eb->vma[handle];
804 805
	} else {
		struct hlist_head *head;
806
		struct eb_vma *ev;
807

808
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
809 810 811
		hlist_for_each_entry(ev, head, node) {
			if (ev->handle == handle)
				return ev;
812 813 814
		}
		return NULL;
	}
815 816
}

817
static void eb_release_vmas(const struct i915_execbuffer *eb)
818
{
819 820 821 822
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
823 824
		struct eb_vma *ev = &eb->vma[i];
		struct i915_vma *vma = ev->vma;
825

826
		if (!vma)
827
			break;
828

829
		eb->vma[i].vma = NULL;
830

831 832
		if (ev->flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, ev->flags);
833

834
		i915_vma_put(vma);
835
	}
836 837
}

838
static void eb_destroy(const struct i915_execbuffer *eb)
839
{
840 841
	GEM_BUG_ON(eb->reloc_cache.rq);

842
	if (eb->lut_size > 0)
843
		kfree(eb->buckets);
844 845
}

846
static inline u64
847
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
848
		  const struct i915_vma *target)
849
{
850
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
851 852
}

853 854
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
855
{
856
	cache->page = -1;
857
	cache->vaddr = 0;
858
	/* Must be a variable in the struct to allow GCC to unroll. */
859
	cache->gen = INTEL_GEN(i915);
860
	cache->has_llc = HAS_LLC(i915);
861
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
862 863
	cache->has_fence = cache->gen < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
864
	cache->node.flags = 0;
865 866
	cache->rq = NULL;
	cache->rq_size = 0;
867
}
868

869 870 871 872 873 874 875 876
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
877 878
}

879 880
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

881 882 883 884 885 886 887
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

888 889 890 891
static void reloc_gpu_flush(struct reloc_cache *cache)
{
	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
892 893

	__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
894
	i915_gem_object_unpin_map(cache->rq->batch->obj);
895

896
	intel_gt_chipset_flush(cache->rq->engine->gt);
897

898
	i915_request_add(cache->rq);
899 900 901
	cache->rq = NULL;
}

902
static void reloc_cache_reset(struct reloc_cache *cache)
903
{
904
	void *vaddr;
905

906 907 908
	if (cache->rq)
		reloc_gpu_flush(cache);

909 910
	if (!cache->vaddr)
		return;
911

912 913 914 915
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
916

917
		kunmap_atomic(vaddr);
918
		i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
919
	} else {
920 921 922
		struct i915_ggtt *ggtt = cache_to_ggtt(cache);

		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
923
		io_mapping_unmap_atomic((void __iomem *)vaddr);
924

925
		if (drm_mm_node_allocated(&cache->node)) {
926 927 928
			ggtt->vm.clear_range(&ggtt->vm,
					     cache->node.start,
					     cache->node.size);
929
			mutex_lock(&ggtt->vm.mutex);
930
			drm_mm_remove_node(&cache->node);
931
			mutex_unlock(&ggtt->vm.mutex);
932 933
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
934
		}
935
	}
936 937 938

	cache->vaddr = 0;
	cache->page = -1;
939 940 941 942
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
943
			unsigned long page)
944
{
945 946 947 948 949 950
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
951
		int err;
952

953
		err = i915_gem_object_prepare_write(obj, &flushes);
954 955
		if (err)
			return ERR_PTR(err);
956 957 958

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
959

960 961 962 963
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
964 965
	}

966 967
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
968
	cache->page = page;
969

970
	return vaddr;
971 972
}

973 974
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
975
			 unsigned long page)
976
{
977
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
978
	unsigned long offset;
979
	void *vaddr;
980

981
	if (cache->vaddr) {
982
		intel_gt_flush_ggtt_writes(ggtt->vm.gt);
983
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
984 985
	} else {
		struct i915_vma *vma;
986
		int err;
987

988 989 990
		if (i915_gem_object_is_tiled(obj))
			return ERR_PTR(-EINVAL);

991
		if (use_cpu_reloc(cache, obj))
992
			return NULL;
993

994
		i915_gem_object_lock(obj);
995
		err = i915_gem_object_set_to_gtt_domain(obj, true);
996
		i915_gem_object_unlock(obj);
997 998
		if (err)
			return ERR_PTR(err);
999

1000
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1001
					       PIN_MAPPABLE |
1002 1003
					       PIN_NONBLOCK /* NOWARN */ |
					       PIN_NOEVICT);
1004 1005
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1006
			mutex_lock(&ggtt->vm.mutex);
1007
			err = drm_mm_insert_node_in_range
1008
				(&ggtt->vm.mm, &cache->node,
1009
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1010
				 0, ggtt->mappable_end,
1011
				 DRM_MM_INSERT_LOW);
1012
			mutex_unlock(&ggtt->vm.mutex);
1013
			if (err) /* no inactive aperture space, use cpu reloc */
1014
				return NULL;
1015 1016 1017
		} else {
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1018
		}
1019
	}
1020

1021
	offset = cache->node.start;
1022
	if (drm_mm_node_allocated(&cache->node)) {
1023 1024 1025
		ggtt->vm.insert_page(&ggtt->vm,
				     i915_gem_object_get_dma_address(obj, page),
				     offset, I915_CACHE_NONE, 0);
1026 1027
	} else {
		offset += page << PAGE_SHIFT;
1028 1029
	}

1030
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1031
							 offset);
1032 1033
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1034

1035
	return vaddr;
1036 1037
}

1038 1039
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1040
			 unsigned long page)
1041
{
1042
	void *vaddr;
1043

1044 1045 1046 1047 1048 1049 1050 1051
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1052 1053
	}

1054
	return vaddr;
1055 1056
}

1057
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1058
{
1059 1060 1061 1062 1063
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1064

1065
		*addr = value;
1066

1067 1068
		/*
		 * Writes to the same cacheline are serialised by the CPU
1069 1070 1071 1072 1073 1074 1075 1076 1077
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1078 1079
}

1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
	int err;

	i915_vma_lock(vma);

	if (obj->cache_dirty & ~obj->cache_coherent)
		i915_gem_clflush_object(obj, 0);
	obj->write_domain = 0;

	err = i915_request_await_object(rq, vma->obj, true);
	if (err == 0)
		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);

	i915_vma_unlock(vma);

	return err;
}

1100 1101 1102 1103 1104
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     struct i915_vma *vma,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
1105
	struct intel_engine_pool_node *pool;
1106
	struct i915_request *rq;
1107 1108 1109 1110
	struct i915_vma *batch;
	u32 *cmd;
	int err;

1111
	pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
1112 1113
	if (IS_ERR(pool))
		return PTR_ERR(pool);
1114

1115
	cmd = i915_gem_object_pin_map(pool->obj,
1116 1117 1118
				      cache->has_llc ?
				      I915_MAP_FORCE_WB :
				      I915_MAP_FORCE_WC);
1119 1120 1121 1122
	if (IS_ERR(cmd)) {
		err = PTR_ERR(cmd);
		goto out_pool;
	}
1123

1124
	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
1125 1126 1127 1128 1129 1130 1131 1132 1133
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
	}

	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
	if (err)
		goto err_unmap;

1134
	rq = i915_request_create(eb->context);
1135 1136 1137 1138 1139
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

1140 1141 1142 1143
	err = intel_engine_pool_mark_active(pool, rq);
	if (err)
		goto err_request;

1144
	err = reloc_move_to_gpu(rq, vma);
1145 1146 1147 1148 1149 1150 1151
	if (err)
		goto err_request;

	err = eb->engine->emit_bb_start(rq,
					batch->node.start, PAGE_SIZE,
					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
	if (err)
1152
		goto skip_request;
1153

1154
	i915_vma_lock(batch);
1155 1156 1157
	err = i915_request_await_object(rq, batch->obj, false);
	if (err == 0)
		err = i915_vma_move_to_active(batch, rq, 0);
1158
	i915_vma_unlock(batch);
1159 1160
	if (err)
		goto skip_request;
1161 1162

	rq->batch = batch;
1163
	i915_vma_unpin(batch);
1164 1165 1166 1167 1168 1169

	cache->rq = rq;
	cache->rq_cmd = cmd;
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
1170
	goto out_pool;
1171

1172
skip_request:
1173
	i915_request_set_error_once(rq, err);
1174
err_request:
1175
	i915_request_add(rq);
1176 1177 1178
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
1179 1180 1181
	i915_gem_object_unpin_map(pool->obj);
out_pool:
	intel_engine_pool_put(pool);
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
	return err;
}

static u32 *reloc_gpu(struct i915_execbuffer *eb,
		      struct i915_vma *vma,
		      unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	u32 *cmd;

	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
		reloc_gpu_flush(cache);

	if (unlikely(!cache->rq)) {
		int err;

1198 1199 1200
		if (!intel_engine_can_store_dword(eb->engine))
			return ERR_PTR(-ENODEV);

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
		err = __reloc_gpu_alloc(eb, vma, len);
		if (unlikely(err))
			return ERR_PTR(err);
	}

	cmd = cache->rq_cmd + cache->rq_size;
	cache->rq_size += len;

	return cmd;
}

1212 1213
static u64
relocate_entry(struct i915_vma *vma,
1214
	       const struct drm_i915_gem_relocation_entry *reloc,
1215 1216
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1217
{
1218
	u64 offset = reloc->offset;
1219 1220
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1221
	void *vaddr;
1222

1223 1224
	if (!eb->reloc_cache.vaddr &&
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1225
	     !dma_resv_test_signaled_rcu(vma->resv, true))) {
1226 1227 1228 1229 1230 1231 1232 1233 1234
		const unsigned int gen = eb->reloc_cache.gen;
		unsigned int len;
		u32 *batch;
		u64 addr;

		if (wide)
			len = offset & 7 ? 8 : 5;
		else if (gen >= 4)
			len = 4;
1235
		else
1236
			len = 3;
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281

		batch = reloc_gpu(eb, vma, len);
		if (IS_ERR(batch))
			goto repeat;

		addr = gen8_canonical_addr(vma->node.start + offset);
		if (wide) {
			if (offset & 7) {
				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);

				addr = gen8_canonical_addr(addr + 4);

				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = upper_32_bits(target_offset);
			} else {
				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);
				*batch++ = upper_32_bits(target_offset);
			}
		} else if (gen >= 6) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else if (gen >= 4) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else {
			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
			*batch++ = addr;
			*batch++ = target_offset;
		}

		goto out;
	}

1282
repeat:
1283
	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1284 1285 1286 1287 1288
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1289
			eb->reloc_cache.vaddr);
1290 1291 1292 1293 1294 1295

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1296 1297
	}

1298
out:
1299
	return target->node.start | UPDATE;
1300 1301
}

1302 1303
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
1304
		  struct eb_vma *ev,
1305
		  const struct drm_i915_gem_relocation_entry *reloc)
1306
{
1307
	struct drm_i915_private *i915 = eb->i915;
1308
	struct eb_vma *target;
1309
	int err;
1310

1311
	/* we've already hold a reference to all valid objects */
1312 1313
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1314
		return -ENOENT;
1315

1316
	/* Validate that the target is in a valid r/w GPU domain */
1317
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1318
		drm_dbg(&i915->drm, "reloc with multiple write domains: "
1319
			  "target %d offset %d "
1320
			  "read %08x write %08x",
1321
			  reloc->target_handle,
1322 1323 1324
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1325
		return -EINVAL;
1326
	}
1327 1328
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1329
		drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
1330
			  "target %d offset %d "
1331
			  "read %08x write %08x",
1332
			  reloc->target_handle,
1333 1334 1335
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1336
		return -EINVAL;
1337 1338
	}

1339
	if (reloc->write_domain) {
1340
		target->flags |= EXEC_OBJECT_WRITE;
1341

1342 1343 1344 1345 1346 1347 1348
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1349
		    IS_GEN(eb->i915, 6)) {
1350 1351
			err = i915_vma_bind(target->vma,
					    target->vma->obj->cache_level,
1352
					    PIN_GLOBAL, NULL);
1353 1354 1355 1356
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1357
	}
1358

1359 1360
	/*
	 * If the relocation already has the right value in it, no
1361 1362
	 * more work needs to be done.
	 */
1363
	if (!DBG_FORCE_RELOC &&
1364
	    gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
1365
		return 0;
1366 1367

	/* Check that the relocation address is valid... */
1368
	if (unlikely(reloc->offset >
1369
		     ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1370
		drm_dbg(&i915->drm, "Relocation beyond object bounds: "
1371 1372 1373
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
1374
			  (int)ev->vma->size);
1375
		return -EINVAL;
1376
	}
1377
	if (unlikely(reloc->offset & 3)) {
1378
		drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
1379 1380 1381
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1382
		return -EINVAL;
1383 1384
	}

1385 1386 1387 1388 1389 1390
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
1391
	 * out of our synchronisation.
1392
	 */
1393
	ev->flags &= ~EXEC_OBJECT_ASYNC;
1394

1395
	/* and update the user's relocation entry */
1396
	return relocate_entry(ev->vma, reloc, eb, target->vma);
1397 1398
}

1399
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
1400
{
1401
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1402 1403
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
1404
	const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1405
	unsigned int remain;
1406

1407
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1408
	remain = entry->relocation_count;
1409 1410
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1411

1412 1413 1414 1415 1416
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
1417
	if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1418 1419 1420 1421 1422 1423 1424
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1425

1426 1427
		/*
		 * This is the fast path and we cannot handle a pagefault
1428 1429 1430 1431 1432 1433 1434
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1435
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1436
		pagefault_enable();
1437 1438
		if (unlikely(copied)) {
			remain = -EFAULT;
1439 1440
			goto out;
		}
1441

1442
		remain -= count;
1443
		do {
1444
			u64 offset = eb_relocate_entry(eb, ev, r);
1445

1446 1447 1448
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1449
				goto out;
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
1473 1474 1475 1476
				if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
					remain = -EFAULT;
					goto out;
				}
1477
			}
1478 1479 1480
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1481
out:
1482
	reloc_cache_reset(&eb->reloc_cache);
1483
	return remain;
1484 1485 1486
}

static int
1487
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
1488
{
1489
	const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1490 1491 1492 1493
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1494 1495

	for (i = 0; i < entry->relocation_count; i++) {
1496
		u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
1497

1498 1499 1500 1501
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1502
	}
1503 1504 1505 1506
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1507 1508
}

1509
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1510
{
1511 1512 1513
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1514

1515 1516 1517
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1518

1519 1520
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1521

1522 1523
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
1524
	if (!access_ok(addr, size))
1525
		return -EFAULT;
1526

1527 1528 1529 1530 1531
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1532
	}
1533
	return __get_user(c, end - 1);
1534
}
1535

1536
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1537
{
1538
	struct drm_i915_gem_relocation_entry *relocs;
1539 1540 1541
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1542

1543 1544 1545 1546 1547
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		unsigned long size;
		unsigned long copied;
1548

1549 1550
		if (nreloc == 0)
			continue;
1551

1552 1553 1554
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1555

1556 1557
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1558

1559
		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1560 1561 1562 1563
		if (!relocs) {
			err = -ENOMEM;
			goto err;
		}
1564

1565 1566 1567 1568 1569 1570 1571
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
1572
					     (char __user *)urelocs + copied,
1573 1574
					     len))
				goto end;
1575

1576 1577
			copied += len;
		} while (copied < size);
1578

1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
1589
		if (!user_access_begin(urelocs, size))
1590
			goto end;
1591

1592 1593 1594 1595 1596
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
		user_access_end();
1597

1598 1599
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1600

1601
	return 0;
1602

1603 1604 1605 1606 1607
end_user:
	user_access_end();
end:
	kvfree(relocs);
	err = -EFAULT;
1608 1609
err:
	while (i--) {
1610
		relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1611 1612 1613 1614
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1615 1616
}

1617
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1618
{
1619 1620
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1621

1622 1623
	for (i = 0; i < count; i++) {
		int err;
1624

1625 1626 1627 1628
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1629

1630
	return 0;
1631 1632
}

1633
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1634
{
1635
	struct drm_device *dev = &eb->i915->drm;
1636
	bool have_copy = false;
1637
	struct eb_vma *ev;
1638 1639 1640 1641 1642 1643 1644
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1645

1646 1647
	mutex_unlock(&dev->struct_mutex);

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1669
	}
1670 1671 1672
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1673 1674
	}

1675 1676 1677
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1678 1679
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1680
		mutex_lock(&dev->struct_mutex);
1681
		goto out;
1682 1683
	}

1684 1685
	GEM_BUG_ON(!eb->batch);

1686
	list_for_each_entry(ev, &eb->relocs, reloc_link) {
1687 1688
		if (!have_copy) {
			pagefault_disable();
1689
			err = eb_relocate_vma(eb, ev);
1690 1691 1692 1693
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
1694
			err = eb_relocate_vma_slow(eb, ev);
1695 1696 1697
			if (err)
				goto err;
		}
1698 1699
	}

1700 1701
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1702 1703 1704 1705 1706 1707
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

1729
	return err;
1730 1731
}

1732
static int eb_relocate(struct i915_execbuffer *eb)
1733
{
1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
	int err;

	mutex_lock(&eb->gem_context->mutex);
	err = eb_lookup_vmas(eb);
	mutex_unlock(&eb->gem_context->mutex);
	if (err)
		return err;

	err = eb_reserve(eb);
	if (err)
		return err;
1745 1746 1747

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
1748
		struct eb_vma *ev;
1749

1750 1751
		list_for_each_entry(ev, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, ev))
1752
				return eb_relocate_slow(eb);
1753 1754 1755 1756 1757 1758 1759 1760 1761
		}
	}

	return 0;
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
1762
	struct ww_acquire_ctx acquire;
1763
	unsigned int i;
1764 1765 1766
	int err = 0;

	ww_acquire_init(&acquire, &reservation_ww_class);
1767

1768
	for (i = 0; i < count; i++) {
1769 1770
		struct eb_vma *ev = &eb->vma[i];
		struct i915_vma *vma = ev->vma;
1771 1772 1773 1774 1775 1776 1777

		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
		if (err == -EDEADLK) {
			GEM_BUG_ON(i == 0);
			do {
				int j = i - 1;

1778
				ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791

				swap(eb->vma[i],  eb->vma[j]);
			} while (--i);

			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
							       &acquire);
		}
		if (err)
			break;
	}
	ww_acquire_done(&acquire);

	while (i--) {
1792 1793 1794
		struct eb_vma *ev = &eb->vma[i];
		struct i915_vma *vma = ev->vma;
		unsigned int flags = ev->flags;
1795
		struct drm_i915_gem_object *obj = vma->obj;
1796

1797 1798
		assert_vma_held(vma);

1799
		if (flags & EXEC_OBJECT_CAPTURE) {
1800
			struct i915_capture_list *capture;
1801 1802

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1803 1804 1805 1806 1807
			if (capture) {
				capture->next = eb->request->capture_list;
				capture->vma = vma;
				eb->request->capture_list = capture;
			}
1808 1809
		}

1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
		/*
		 * If the GPU is not _reading_ through the CPU cache, we need
		 * to make sure that any writes (both previous GPU writes from
		 * before a change in snooping levels and normal CPU writes)
		 * caught in that cache are flushed to main memory.
		 *
		 * We want to say
		 *   obj->cache_dirty &&
		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		 * but gcc's optimiser doesn't handle that as well and emits
		 * two jumps instead of one. Maybe one day...
		 */
		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1823
			if (i915_gem_clflush_object(obj, 0))
1824
				flags &= ~EXEC_OBJECT_ASYNC;
1825 1826
		}

1827 1828 1829 1830
		if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
			err = i915_request_await_object
				(eb->request, obj, flags & EXEC_OBJECT_WRITE);
		}
1831

1832 1833
		if (err == 0)
			err = i915_vma_move_to_active(vma, eb->request, flags);
1834

1835
		i915_vma_unlock(vma);
1836

1837
		__eb_unreserve_vma(vma, flags);
1838
		i915_vma_put(vma);
1839 1840

		ev->vma = NULL;
1841
	}
1842 1843 1844 1845 1846
	ww_acquire_fini(&acquire);

	if (unlikely(err))
		goto err_skip;

1847
	eb->exec = NULL;
1848

1849
	/* Unconditionally flush any chipset caches (for streaming writes). */
1850
	intel_gt_chipset_flush(eb->engine->gt);
1851
	return 0;
1852 1853

err_skip:
1854
	i915_request_set_error_once(eb->request, err);
1855
	return err;
1856 1857
}

T
Tvrtko Ursulin 已提交
1858
static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1859
{
1860
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
T
Tvrtko Ursulin 已提交
1861
		return -EINVAL;
1862

C
Chris Wilson 已提交
1863
	/* Kernel clipping was a DRI1 misfeature */
1864 1865
	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
		if (exec->num_cliprects || exec->cliprects_ptr)
T
Tvrtko Ursulin 已提交
1866
			return -EINVAL;
1867
	}
C
Chris Wilson 已提交
1868 1869 1870 1871 1872 1873

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
T
Tvrtko Ursulin 已提交
1874
		return -EINVAL;
C
Chris Wilson 已提交
1875 1876

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
T
Tvrtko Ursulin 已提交
1877
		return -EINVAL;
C
Chris Wilson 已提交
1878

T
Tvrtko Ursulin 已提交
1879
	return 0;
1880 1881
}

1882
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1883
{
1884 1885
	u32 *cs;
	int i;
1886

1887
	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
1888
		drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
1889 1890
		return -EINVAL;
	}
1891

1892
	cs = intel_ring_begin(rq, 4 * 2 + 2);
1893 1894
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1895

1896
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1897
	for (i = 0; i < 4; i++) {
1898 1899
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1900
	}
1901
	*cs++ = MI_NOOP;
1902
	intel_ring_advance(rq, cs);
1903 1904 1905 1906

	return 0;
}

1907
static struct i915_vma *
1908 1909 1910
shadow_batch_pin(struct drm_i915_gem_object *obj,
		 struct i915_address_space *vm,
		 unsigned int flags)
1911
{
1912 1913
	struct i915_vma *vma;
	int err;
1914

1915 1916 1917 1918 1919 1920 1921 1922 1923
	vma = i915_vma_instance(obj, vm, NULL);
	if (IS_ERR(vma))
		return vma;

	err = i915_vma_pin(vma, 0, 0, flags);
	if (err)
		return ERR_PTR(err);

	return vma;
1924 1925
}

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947
struct eb_parse_work {
	struct dma_fence_work base;
	struct intel_engine_cs *engine;
	struct i915_vma *batch;
	struct i915_vma *shadow;
	struct i915_vma *trampoline;
	unsigned int batch_offset;
	unsigned int batch_length;
};

static int __eb_parse(struct dma_fence_work *work)
{
	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);

	return intel_engine_cmd_parser(pw->engine,
				       pw->batch,
				       pw->batch_offset,
				       pw->batch_length,
				       pw->shadow,
				       pw->trampoline);
}

1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
static void __eb_parse_release(struct dma_fence_work *work)
{
	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);

	if (pw->trampoline)
		i915_active_release(&pw->trampoline->active);
	i915_active_release(&pw->shadow->active);
	i915_active_release(&pw->batch->active);
}

1958 1959 1960
static const struct dma_fence_work_ops eb_parse_ops = {
	.name = "eb_parse",
	.work = __eb_parse,
1961
	.release = __eb_parse_release,
1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
};

static int eb_parse_pipeline(struct i915_execbuffer *eb,
			     struct i915_vma *shadow,
			     struct i915_vma *trampoline)
{
	struct eb_parse_work *pw;
	int err;

	pw = kzalloc(sizeof(*pw), GFP_KERNEL);
	if (!pw)
		return -ENOMEM;

1975
	err = i915_active_acquire(&eb->batch->vma->active);
1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
	if (err)
		goto err_free;

	err = i915_active_acquire(&shadow->active);
	if (err)
		goto err_batch;

	if (trampoline) {
		err = i915_active_acquire(&trampoline->active);
		if (err)
			goto err_shadow;
	}

1989 1990 1991
	dma_fence_work_init(&pw->base, &eb_parse_ops);

	pw->engine = eb->engine;
1992
	pw->batch = eb->batch->vma;
1993 1994 1995 1996 1997
	pw->batch_offset = eb->batch_start_offset;
	pw->batch_length = eb->batch_len;
	pw->shadow = shadow;
	pw->trampoline = trampoline;

1998 1999 2000
	err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
	if (err)
		goto err_trampoline;
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027

	err = dma_resv_reserve_shared(pw->batch->resv, 1);
	if (err)
		goto err_batch_unlock;

	/* Wait for all writes (and relocs) into the batch to complete */
	err = i915_sw_fence_await_reservation(&pw->base.chain,
					      pw->batch->resv, NULL, false,
					      0, I915_FENCE_GFP);
	if (err < 0)
		goto err_batch_unlock;

	/* Keep the batch alive and unwritten as we parse */
	dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);

	dma_resv_unlock(pw->batch->resv);

	/* Force execution to wait for completion of the parser */
	dma_resv_lock(shadow->resv, NULL);
	dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
	dma_resv_unlock(shadow->resv);

	dma_fence_work_commit(&pw->base);
	return 0;

err_batch_unlock:
	dma_resv_unlock(pw->batch->resv);
2028 2029 2030 2031 2032 2033
err_trampoline:
	if (trampoline)
		i915_active_release(&trampoline->active);
err_shadow:
	i915_active_release(&shadow->active);
err_batch:
2034
	i915_active_release(&eb->batch->vma->active);
2035
err_free:
2036 2037 2038 2039
	kfree(pw);
	return err;
}

2040
static int eb_parse(struct i915_execbuffer *eb)
2041
{
2042
	struct drm_i915_private *i915 = eb->i915;
2043
	struct intel_engine_pool_node *pool;
2044 2045
	struct i915_vma *shadow, *trampoline;
	unsigned int len;
2046
	int err;
2047

2048 2049 2050
	if (!eb_use_cmdparser(eb))
		return 0;

2051 2052 2053 2054 2055 2056 2057
	len = eb->batch_len;
	if (!CMDPARSER_USES_GGTT(eb->i915)) {
		/*
		 * ppGTT backed shadow buffers must be mapped RO, to prevent
		 * post-scan tampering
		 */
		if (!eb->context->vm->has_read_only) {
2058 2059
			drm_dbg(&i915->drm,
				"Cannot prevent post-scan tampering without RO capable vm\n");
2060 2061 2062 2063 2064 2065 2066
			return -EINVAL;
		}
	} else {
		len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
	}

	pool = intel_engine_get_pool(eb->engine, len);
2067
	if (IS_ERR(pool))
2068
		return PTR_ERR(pool);
2069

2070 2071 2072
	shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
	if (IS_ERR(shadow)) {
		err = PTR_ERR(shadow);
2073
		goto err;
2074
	}
2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091
	i915_gem_object_set_readonly(shadow->obj);

	trampoline = NULL;
	if (CMDPARSER_USES_GGTT(eb->i915)) {
		trampoline = shadow;

		shadow = shadow_batch_pin(pool->obj,
					  &eb->engine->gt->ggtt->vm,
					  PIN_GLOBAL);
		if (IS_ERR(shadow)) {
			err = PTR_ERR(shadow);
			shadow = trampoline;
			goto err_shadow;
		}

		eb->batch_flags |= I915_DISPATCH_SECURE;
	}
2092

2093
	err = eb_parse_pipeline(eb, shadow, trampoline);
2094 2095
	if (err)
		goto err_trampoline;
2096

2097
	eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
2098
	eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
2099
	eb->batch = &eb->vma[eb->buffer_count++];
2100

2101
	eb->trampoline = trampoline;
2102 2103
	eb->batch_start_offset = 0;

2104
	shadow->private = pool;
2105
	return 0;
2106

2107 2108 2109 2110 2111
err_trampoline:
	if (trampoline)
		i915_vma_unpin(trampoline);
err_shadow:
	i915_vma_unpin(shadow);
2112 2113
err:
	intel_engine_pool_put(pool);
2114
	return err;
2115
}
2116

2117
static void
2118
add_to_client(struct i915_request *rq, struct drm_file *file)
2119
{
2120 2121 2122 2123 2124 2125 2126
	struct drm_i915_file_private *file_priv = file->driver_priv;

	rq->file_priv = file_priv;

	spin_lock(&file_priv->mm.lock);
	list_add_tail(&rq->client_link, &file_priv->mm.request_list);
	spin_unlock(&file_priv->mm.lock);
2127 2128
}

2129
static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
2130
{
2131
	int err;
2132

2133 2134 2135
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
2136

2137
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2138 2139 2140
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
2141 2142
	}

2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
	/*
	 * After we completed waiting for other engines (using HW semaphores)
	 * then we can signal that this request/batch is ready to run. This
	 * allows us to determine if the batch is still waiting on the GPU
	 * or actually running by checking the breadcrumb.
	 */
	if (eb->engine->emit_init_breadcrumb) {
		err = eb->engine->emit_init_breadcrumb(eb->request);
		if (err)
			return err;
	}

2155
	err = eb->engine->emit_bb_start(eb->request,
2156
					batch->node.start +
2157 2158
					eb->batch_start_offset,
					eb->batch_len,
2159 2160 2161
					eb->batch_flags);
	if (err)
		return err;
2162

2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
	if (eb->trampoline) {
		GEM_BUG_ON(eb->batch_start_offset);
		err = eb->engine->emit_bb_start(eb->request,
						eb->trampoline->node.start +
						eb->batch_len,
						0, 0);
		if (err)
			return err;
	}

2173
	if (intel_context_nopreempt(eb->context))
2174
		__set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
2175

C
Chris Wilson 已提交
2176
	return 0;
2177 2178
}

2179 2180 2181 2182 2183 2184
static int num_vcs_engines(const struct drm_i915_private *i915)
{
	return hweight64(INTEL_INFO(i915)->engine_mask &
			 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
}

2185
/*
2186
 * Find one BSD ring to dispatch the corresponding BSD command.
2187
 * The engine index is returned.
2188
 */
2189
static unsigned int
2190 2191
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
2192 2193 2194
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

2195
	/* Check whether the file_priv has already selected one ring. */
2196
	if ((int)file_priv->bsd_engine < 0)
2197 2198
		file_priv->bsd_engine =
			get_random_int() % num_vcs_engines(dev_priv);
2199

2200
	return file_priv->bsd_engine;
2201 2202
}

2203
static const enum intel_engine_id user_ring_map[] = {
2204 2205 2206 2207 2208
	[I915_EXEC_DEFAULT]	= RCS0,
	[I915_EXEC_RENDER]	= RCS0,
	[I915_EXEC_BLT]		= BCS0,
	[I915_EXEC_BSD]		= VCS0,
	[I915_EXEC_VEBOX]	= VECS0
2209 2210
};

2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250
static struct i915_request *eb_throttle(struct intel_context *ce)
{
	struct intel_ring *ring = ce->ring;
	struct intel_timeline *tl = ce->timeline;
	struct i915_request *rq;

	/*
	 * Completely unscientific finger-in-the-air estimates for suitable
	 * maximum user request size (to avoid blocking) and then backoff.
	 */
	if (intel_ring_update_space(ring) >= PAGE_SIZE)
		return NULL;

	/*
	 * Find a request that after waiting upon, there will be at least half
	 * the ring available. The hysteresis allows us to compete for the
	 * shared ring and should mean that we sleep less often prior to
	 * claiming our resources, but not so long that the ring completely
	 * drains before we can submit our next request.
	 */
	list_for_each_entry(rq, &tl->requests, link) {
		if (rq->ring != ring)
			continue;

		if (__intel_ring_space(rq->postfix,
				       ring->emit, ring->size) > ring->size / 2)
			break;
	}
	if (&rq->link == &tl->requests)
		return NULL; /* weird, we will check again later for real */

	return i915_request_get(rq);
}

static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
{
	struct intel_timeline *tl;
	struct i915_request *rq;
	int err;

2251 2252 2253 2254
	/*
	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
	 * EIO if the GPU is already wedged.
	 */
2255
	err = intel_gt_terminally_wedged(ce->engine->gt);
2256 2257 2258
	if (err)
		return err;

2259 2260 2261
	if (unlikely(intel_context_is_banned(ce)))
		return -EIO;

2262 2263 2264 2265 2266
	/*
	 * Pinning the contexts may generate requests in order to acquire
	 * GGTT space, so do this first before we reserve a seqno for
	 * ourselves.
	 */
2267
	err = intel_context_pin(ce);
2268 2269
	if (err)
		return err;
2270

2271 2272 2273 2274 2275 2276 2277 2278
	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
	 * we expect to access the hardware fairly frequently in the
	 * process, and require the engine to be kept awake between accesses.
	 * Upon dispatch, we acquire another prolonged wakeref that we hold
	 * until the timeline is idle, which in turn releases the wakeref
	 * taken on the engine, and the parent device.
	 */
2279 2280 2281
	tl = intel_context_timeline_lock(ce);
	if (IS_ERR(tl)) {
		err = PTR_ERR(tl);
2282
		goto err_unpin;
2283
	}
2284 2285

	intel_context_enter(ce);
2286 2287 2288 2289 2290
	rq = eb_throttle(ce);

	intel_context_timeline_unlock(tl);

	if (rq) {
2291 2292 2293 2294 2295 2296
		bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
		long timeout;

		timeout = MAX_SCHEDULE_TIMEOUT;
		if (nonblock)
			timeout = 0;
2297

2298 2299 2300
		timeout = i915_request_wait(rq,
					    I915_WAIT_INTERRUPTIBLE,
					    timeout);
2301
		i915_request_put(rq);
2302 2303 2304 2305 2306

		if (timeout < 0) {
			err = nonblock ? -EWOULDBLOCK : timeout;
			goto err_exit;
		}
2307
	}
2308

2309
	eb->engine = ce->engine;
2310 2311
	eb->context = ce;
	return 0;
2312

2313 2314 2315 2316
err_exit:
	mutex_lock(&tl->mutex);
	intel_context_exit(ce);
	intel_context_timeline_unlock(tl);
2317
err_unpin:
2318
	intel_context_unpin(ce);
2319
	return err;
2320 2321
}

2322
static void eb_unpin_engine(struct i915_execbuffer *eb)
2323
{
2324
	struct intel_context *ce = eb->context;
2325
	struct intel_timeline *tl = ce->timeline;
2326 2327 2328 2329 2330

	mutex_lock(&tl->mutex);
	intel_context_exit(ce);
	mutex_unlock(&tl->mutex);

2331
	intel_context_unpin(ce);
2332
}
2333

2334 2335 2336 2337
static unsigned int
eb_select_legacy_ring(struct i915_execbuffer *eb,
		      struct drm_file *file,
		      struct drm_i915_gem_execbuffer2 *args)
2338
{
2339
	struct drm_i915_private *i915 = eb->i915;
2340 2341
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;

2342 2343
	if (user_ring_id != I915_EXEC_BSD &&
	    (args->flags & I915_EXEC_BSD_MASK)) {
2344 2345 2346
		drm_dbg(&i915->drm,
			"execbuf with non bsd ring but with invalid "
			"bsd dispatch flags: %d\n", (int)(args->flags));
2347
		return -1;
2348 2349
	}

2350
	if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2351 2352 2353
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2354
			bsd_idx = gen8_dispatch_bsd_engine(i915, file);
2355 2356
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2357
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2358 2359
			bsd_idx--;
		} else {
2360 2361 2362
			drm_dbg(&i915->drm,
				"execbuf with unknown bsd ring: %u\n",
				bsd_idx);
2363
			return -1;
2364 2365
		}

2366
		return _VCS(bsd_idx);
2367 2368
	}

2369
	if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2370 2371
		drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
			user_ring_id);
2372
		return -1;
2373 2374
	}

2375 2376 2377 2378
	return user_ring_map[user_ring_id];
}

static int
2379 2380 2381
eb_pin_engine(struct i915_execbuffer *eb,
	      struct drm_file *file,
	      struct drm_i915_gem_execbuffer2 *args)
2382 2383 2384 2385 2386
{
	struct intel_context *ce;
	unsigned int idx;
	int err;

2387 2388 2389 2390
	if (i915_gem_context_user_engines(eb->gem_context))
		idx = args->flags & I915_EXEC_RING_MASK;
	else
		idx = eb_select_legacy_ring(eb, file, args);
2391 2392 2393 2394 2395

	ce = i915_gem_context_get_engine(eb->gem_context, idx);
	if (IS_ERR(ce))
		return PTR_ERR(ce);

2396
	err = __eb_pin_engine(eb, ce);
2397 2398 2399
	intel_context_put(ce);

	return err;
2400 2401
}

2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413
static void
__free_fence_array(struct drm_syncobj **fences, unsigned int n)
{
	while (n--)
		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
	kvfree(fences);
}

static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_file *file)
{
2414
	const unsigned long nfences = args->num_cliprects;
2415 2416
	struct drm_i915_gem_exec_fence __user *user;
	struct drm_syncobj **fences;
2417
	unsigned long n;
2418 2419 2420 2421 2422
	int err;

	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
		return NULL;

2423 2424 2425 2426 2427
	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
	if (nfences > min_t(unsigned long,
			    ULONG_MAX / sizeof(*user),
			    SIZE_MAX / sizeof(*fences)))
2428 2429 2430
		return ERR_PTR(-EINVAL);

	user = u64_to_user_ptr(args->cliprects_ptr);
2431
	if (!access_ok(user, nfences * sizeof(*user)))
2432 2433
		return ERR_PTR(-EFAULT);

2434
	fences = kvmalloc_array(nfences, sizeof(*fences),
2435
				__GFP_NOWARN | GFP_KERNEL);
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447
	if (!fences)
		return ERR_PTR(-ENOMEM);

	for (n = 0; n < nfences; n++) {
		struct drm_i915_gem_exec_fence fence;
		struct drm_syncobj *syncobj;

		if (__copy_from_user(&fence, user++, sizeof(fence))) {
			err = -EFAULT;
			goto err;
		}

2448 2449 2450 2451 2452
		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
			err = -EINVAL;
			goto err;
		}

2453 2454 2455 2456 2457 2458 2459
		syncobj = drm_syncobj_find(file, fence.handle);
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
			err = -ENOENT;
			goto err;
		}

2460 2461 2462
		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);

2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
	}

	return fences;

err:
	__free_fence_array(fences, n);
	return ERR_PTR(err);
}

static void
put_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_syncobj **fences)
{
	if (fences)
		__free_fence_array(fences, args->num_cliprects);
}

static int
await_fence_array(struct i915_execbuffer *eb,
		  struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	unsigned int n;
	int err;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		struct dma_fence *fence;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_WAIT))
			continue;

J
Jason Ekstrand 已提交
2498
		fence = drm_syncobj_fence_get(syncobj);
2499 2500 2501
		if (!fence)
			return -EINVAL;

2502
		err = i915_request_await_dma_fence(eb->request, fence);
2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526
		dma_fence_put(fence);
		if (err < 0)
			return err;
	}

	return 0;
}

static void
signal_fence_array(struct i915_execbuffer *eb,
		   struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	struct dma_fence * const fence = &eb->request->fence;
	unsigned int n;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_SIGNAL))
			continue;

2527
		drm_syncobj_replace_fence(syncobj, fence);
2528 2529 2530
	}
}

2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582
static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
{
	struct i915_request *rq, *rn;

	list_for_each_entry_safe(rq, rn, &tl->requests, link)
		if (rq == end || !i915_request_retire(rq))
			break;
}

static void eb_request_add(struct i915_execbuffer *eb)
{
	struct i915_request *rq = eb->request;
	struct intel_timeline * const tl = i915_request_timeline(rq);
	struct i915_sched_attr attr = {};
	struct i915_request *prev;

	lockdep_assert_held(&tl->mutex);
	lockdep_unpin_lock(&tl->mutex, rq->cookie);

	trace_i915_request_add(rq);

	prev = __i915_request_commit(rq);

	/* Check that the context wasn't destroyed before submission */
	if (likely(rcu_access_pointer(eb->context->gem_context))) {
		attr = eb->gem_context->sched;

		/*
		 * Boost actual workloads past semaphores!
		 *
		 * With semaphores we spin on one engine waiting for another,
		 * simply to reduce the latency of starting our work when
		 * the signaler completes. However, if there is any other
		 * work that we could be doing on this engine instead, that
		 * is better utilisation and will reduce the overall duration
		 * of the current work. To avoid PI boosting a semaphore
		 * far in the distance past over useful work, we keep a history
		 * of any semaphore use along our dependency chain.
		 */
		if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
			attr.priority |= I915_PRIORITY_NOSEMAPHORE;

		/*
		 * Boost priorities to new clients (new request flows).
		 *
		 * Allow interactive/synchronous clients to jump ahead of
		 * the bulk clients. (FQ_CODEL)
		 */
		if (list_empty(&rq->sched.signalers_list))
			attr.priority |= I915_PRIORITY_WAIT;
	} else {
		/* Serialise with context_close via the add_to_timeline */
2583 2584
		i915_request_set_error_once(rq, -ENOENT);
		__i915_request_skip(rq);
2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
	}

	local_bh_disable();
	__i915_request_queue(rq, &attr);
	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */

	/* Try to clean up the client's timeline after submitting the request */
	if (prev)
		retire_requests(tl, prev);

	mutex_unlock(&tl->mutex);
}

2598
static int
2599
i915_gem_do_execbuffer(struct drm_device *dev,
2600 2601
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
2602 2603
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_syncobj **fences)
2604
{
2605
	struct drm_i915_private *i915 = to_i915(dev);
2606
	struct i915_execbuffer eb;
2607
	struct dma_fence *in_fence = NULL;
2608
	struct dma_fence *exec_fence = NULL;
2609
	struct sync_file *out_fence = NULL;
2610
	struct i915_vma *batch;
2611
	int out_fence_fd = -1;
2612
	int err;
2613

2614
	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2615 2616
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2617

2618
	eb.i915 = i915;
2619 2620
	eb.file = file;
	eb.args = args;
2621
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2622
		args->flags |= __EXEC_HAS_RELOC;
2623

2624
	eb.exec = exec;
2625 2626
	eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
	eb.vma[0].vma = NULL;
2627

2628
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2629 2630
	reloc_cache_init(&eb.reloc_cache, eb.i915);

2631
	eb.buffer_count = args->buffer_count;
2632 2633
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;
2634
	eb.trampoline = NULL;
2635

2636
	eb.batch_flags = 0;
2637
	if (args->flags & I915_EXEC_SECURE) {
2638 2639 2640 2641 2642 2643 2644
		if (INTEL_GEN(i915) >= 11)
			return -ENODEV;

		/* Return -EPERM to trigger fallback code on old binaries. */
		if (!HAS_SECURE_BATCHES(i915))
			return -EPERM;

2645
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2646
			return -EPERM;
2647

2648
		eb.batch_flags |= I915_DISPATCH_SECURE;
2649
	}
2650
	if (args->flags & I915_EXEC_IS_PINNED)
2651
		eb.batch_flags |= I915_DISPATCH_PINNED;
2652

2653 2654
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2655 2656
		if (!in_fence)
			return -EINVAL;
2657 2658
	}

2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
	if (args->flags & I915_EXEC_FENCE_SUBMIT) {
		if (in_fence) {
			err = -EINVAL;
			goto err_in_fence;
		}

		exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
		if (!exec_fence) {
			err = -EINVAL;
			goto err_in_fence;
		}
	}

2672 2673 2674
	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
2675
			err = out_fence_fd;
2676
			goto err_exec_fence;
2677 2678 2679
		}
	}

2680 2681 2682 2683 2684
	err = eb_create(&eb);
	if (err)
		goto err_out_fence;

	GEM_BUG_ON(!eb.lut_size);
2685

2686 2687 2688 2689
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_destroy;

2690
	err = eb_pin_engine(&eb, file, args);
2691
	if (unlikely(err))
2692
		goto err_context;
2693

2694 2695
	err = i915_mutex_lock_interruptible(dev);
	if (err)
2696 2697
		goto err_engine;

2698
	err = eb_relocate(&eb);
2699
	if (err) {
2700 2701 2702 2703 2704 2705 2706 2707 2708
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
		goto err_vma;
2709
	}
2710

2711
	if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
2712 2713
		drm_dbg(&i915->drm,
			"Attempting to use self-modifying batch buffer\n");
2714 2715
		err = -EINVAL;
		goto err_vma;
2716
	}
2717 2718 2719 2720

	if (range_overflows_t(u64,
			      eb.batch_start_offset, eb.batch_len,
			      eb.batch->vma->size)) {
2721
		drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
2722 2723
		err = -EINVAL;
		goto err_vma;
2724
	}
2725

2726
	if (eb.batch_len == 0)
2727
		eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
2728

2729 2730 2731
	err = eb_parse(&eb);
	if (err)
		goto err_vma;
2732

2733 2734
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2735
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2736
	 * hsw should have this fixed, but bdw mucks it up again. */
2737
	batch = eb.batch->vma;
2738
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2739
		struct i915_vma *vma;
2740

2741 2742 2743 2744 2745 2746
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2747
		 *   so we don't really have issues with multiple objects not
2748 2749 2750
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2751
		vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2752
		if (IS_ERR(vma)) {
2753
			err = PTR_ERR(vma);
2754
			goto err_parse;
C
Chris Wilson 已提交
2755
		}
2756

2757
		batch = vma;
2758
	}
2759

2760 2761 2762
	/* All GPU relocation batches must be submitted prior to the user rq */
	GEM_BUG_ON(eb.reloc_cache.rq);

2763
	/* Allocate a request for this batch buffer nice and early. */
2764
	eb.request = i915_request_create(eb.context);
2765
	if (IS_ERR(eb.request)) {
2766
		err = PTR_ERR(eb.request);
2767
		goto err_batch_unpin;
2768
	}
2769

2770
	if (in_fence) {
2771
		err = i915_request_await_dma_fence(eb.request, in_fence);
2772
		if (err < 0)
2773 2774 2775
			goto err_request;
	}

2776 2777 2778 2779 2780 2781 2782
	if (exec_fence) {
		err = i915_request_await_execution(eb.request, exec_fence,
						   eb.engine->bond_execute);
		if (err < 0)
			goto err_request;
	}

2783 2784 2785 2786 2787 2788
	if (fences) {
		err = await_fence_array(&eb, fences);
		if (err)
			goto err_request;
	}

2789
	if (out_fence_fd != -1) {
2790
		out_fence = sync_file_create(&eb.request->fence);
2791
		if (!out_fence) {
2792
			err = -ENOMEM;
2793 2794 2795 2796
			goto err_request;
		}
	}

2797 2798
	/*
	 * Whilst this request exists, batch_obj will be on the
2799 2800 2801 2802 2803
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2804 2805 2806
	eb.request->batch = batch;
	if (batch->private)
		intel_engine_pool_mark_active(batch->private, eb.request);
2807

2808
	trace_i915_request_queue(eb.request, eb.batch_flags);
2809
	err = eb_submit(&eb, batch);
2810
err_request:
2811
	add_to_client(eb.request, file);
2812
	i915_request_get(eb.request);
2813
	eb_request_add(&eb);
2814

2815 2816 2817
	if (fences)
		signal_fence_array(&eb, fences);

2818
	if (out_fence) {
2819
		if (err == 0) {
2820
			fd_install(out_fence_fd, out_fence->file);
2821
			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2822 2823 2824 2825 2826 2827
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2828
	i915_request_put(eb.request);
2829

2830
err_batch_unpin:
2831
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2832
		i915_vma_unpin(batch);
2833
err_parse:
2834 2835
	if (batch->private)
		intel_engine_pool_put(batch->private);
2836 2837 2838
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
2839 2840
	if (eb.trampoline)
		i915_vma_unpin(eb.trampoline);
2841
	mutex_unlock(&dev->struct_mutex);
2842 2843
err_engine:
	eb_unpin_engine(&eb);
2844
err_context:
2845
	i915_gem_context_put(eb.gem_context);
2846
err_destroy:
2847
	eb_destroy(&eb);
2848
err_out_fence:
2849 2850
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2851 2852
err_exec_fence:
	dma_fence_put(exec_fence);
2853
err_in_fence:
2854
	dma_fence_put(in_fence);
2855
	return err;
2856 2857
}

2858 2859
static size_t eb_element_size(void)
{
2860
	return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
}

static bool check_buffer_count(size_t count)
{
	const size_t sz = eb_element_size();

	/*
	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
	 * array size (see eb_create()). Otherwise, we can accept an array as
	 * large as can be addressed (though use large arrays at your peril)!
	 */

	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}

2876 2877 2878 2879 2880
/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
2881 2882
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
2883
{
2884
	struct drm_i915_private *i915 = to_i915(dev);
2885 2886 2887 2888
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2889
	const size_t count = args->buffer_count;
2890 2891
	unsigned int i;
	int err;
2892

2893
	if (!check_buffer_count(count)) {
2894
		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2895 2896 2897
		return -EINVAL;
	}

2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

T
Tvrtko Ursulin 已提交
2909 2910 2911
	err = i915_gem_check_execbuffer(&exec2);
	if (err)
		return err;
2912

2913
	/* Copy in the exec list from userland */
2914
	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2915
				   __GFP_NOWARN | GFP_KERNEL);
2916
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2917
				    __GFP_NOWARN | GFP_KERNEL);
2918
	if (exec_list == NULL || exec2_list == NULL) {
2919 2920 2921
		drm_dbg(&i915->drm,
			"Failed to allocate exec list for %d buffers\n",
			args->buffer_count);
M
Michal Hocko 已提交
2922 2923
		kvfree(exec_list);
		kvfree(exec2_list);
2924 2925
		return -ENOMEM;
	}
2926
	err = copy_from_user(exec_list,
2927
			     u64_to_user_ptr(args->buffers_ptr),
2928
			     sizeof(*exec_list) * count);
2929
	if (err) {
2930 2931
		drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
			args->buffer_count, err);
M
Michal Hocko 已提交
2932 2933
		kvfree(exec_list);
		kvfree(exec2_list);
2934 2935 2936 2937 2938 2939 2940 2941 2942
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2943
		if (INTEL_GEN(to_i915(dev)) < 4)
2944 2945 2946 2947 2948
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2949
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2950
	if (exec2.flags & __EXEC_HAS_RELOC) {
2951
		struct drm_i915_gem_exec_object __user *user_exec_list =
2952
			u64_to_user_ptr(args->buffers_ptr);
2953

2954
		/* Copy the new buffer offsets back to the user's exec list. */
2955
		for (i = 0; i < args->buffer_count; i++) {
2956 2957 2958
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2959
			exec2_list[i].offset =
2960 2961 2962 2963 2964
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
2965
				break;
2966 2967 2968
		}
	}

M
Michal Hocko 已提交
2969 2970
	kvfree(exec_list);
	kvfree(exec2_list);
2971
	return err;
2972 2973 2974
}

int
2975 2976
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file)
2977
{
2978
	struct drm_i915_private *i915 = to_i915(dev);
2979
	struct drm_i915_gem_execbuffer2 *args = data;
2980
	struct drm_i915_gem_exec_object2 *exec2_list;
2981
	struct drm_syncobj **fences = NULL;
2982
	const size_t count = args->buffer_count;
2983
	int err;
2984

2985
	if (!check_buffer_count(count)) {
2986
		drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2987 2988 2989
		return -EINVAL;
	}

T
Tvrtko Ursulin 已提交
2990 2991 2992
	err = i915_gem_check_execbuffer(args);
	if (err)
		return err;
2993 2994

	/* Allocate an extra slot for use by the command parser */
2995
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2996
				    __GFP_NOWARN | GFP_KERNEL);
2997
	if (exec2_list == NULL) {
2998 2999
		drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
			count);
3000 3001
		return -ENOMEM;
	}
3002 3003
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
3004
			   sizeof(*exec2_list) * count)) {
3005
		drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
M
Michal Hocko 已提交
3006
		kvfree(exec2_list);
3007 3008 3009
		return -EFAULT;
	}

3010 3011 3012 3013 3014 3015 3016 3017 3018
	if (args->flags & I915_EXEC_FENCE_ARRAY) {
		fences = get_fence_array(args, file);
		if (IS_ERR(fences)) {
			kvfree(exec2_list);
			return PTR_ERR(fences);
		}
	}

	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
3019 3020 3021 3022 3023 3024 3025 3026

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
3027
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
3028 3029
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
3030

3031
		/* Copy the new buffer offsets back to the user's exec list. */
3032 3033 3034 3035 3036 3037 3038 3039
		/*
		 * Note: count * sizeof(*user_exec_list) does not overflow,
		 * because we checked 'count' in check_buffer_count().
		 *
		 * And this range already got effectively checked earlier
		 * when we did the "copy_from_user()" above.
		 */
		if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
3040
			goto end;
3041

3042
		for (i = 0; i < args->buffer_count; i++) {
3043 3044 3045
			if (!(exec2_list[i].offset & UPDATE))
				continue;

3046
			exec2_list[i].offset =
3047 3048 3049 3050
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
3051
		}
3052 3053
end_user:
		user_access_end();
3054
end:;
3055 3056
	}

3057
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
3058
	put_fence_array(args, fences);
M
Michal Hocko 已提交
3059
	kvfree(exec2_list);
3060
	return err;
3061
}