i915_gem_execbuffer.c 71.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
31
#include <linux/sync_file.h>
32 33
#include <linux/uaccess.h>

34
#include <drm/drmP.h>
35
#include <drm/drm_syncobj.h>
36
#include <drm/i915_drm.h>
37

38
#include "i915_drv.h"
39
#include "i915_gem_clflush.h"
40 41
#include "i915_trace.h"
#include "intel_drv.h"
42
#include "intel_frontbuffer.h"
43

44 45 46 47 48 49
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
50

51 52 53 54 55 56
#define __EXEC_OBJECT_HAS_REF		BIT(31)
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
57 58 59 60
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
#define __EXEC_VALIDATED	BIT(30)
61
#define __EXEC_INTERNAL_FLAGS	(~0u << 30)
62
#define UPDATE			PIN_OFFSET_FIXED
63 64

#define BATCH_OFFSET_BIAS (256*1024)
65

66 67
#define __I915_EXEC_ILLEGAL_FLAGS \
	(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
68

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
 * At the level of talking to the hardware, submitting a batchbuffer for the
 * GPU to execute is to add content to a buffer from which the HW
 * command streamer is reading.
 *
 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
 *    Execlists, this command is not placed on the same buffer as the
 *    remaining items.
 *
 * 2. Add a command to invalidate caches to the buffer.
 *
 * 3. Add a batchbuffer start command to the buffer; the start command is
 *    essentially a token together with the GPU address of the batchbuffer
 *    to be executed.
 *
 * 4. Add a pipeline flush to the buffer.
 *
 * 5. Add a memory write command to the buffer to record when the GPU
 *    is done executing the batchbuffer. The memory write writes the
 *    global sequence number of the request, ``i915_request::global_seqno``;
 *    the i915 driver uses the current value in the register to determine
 *    if the GPU has completed the batchbuffer.
 *
 * 6. Add a user interrupt command to the buffer. This command instructs
 *    the GPU to issue an interrupt when the command, pipeline flush and
 *    memory write are completed.
 *
 * 7. Inform the hardware of the additional commands added to the buffer
 *    (by updating the tail pointer).
 *
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

220
struct i915_execbuffer {
221 222 223 224
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
225 226
	struct i915_vma **vma;
	unsigned int *flags;
227 228 229 230 231

	struct intel_engine_cs *engine; /** engine to queue the request to */
	struct i915_gem_context *ctx; /** context for building the request */
	struct i915_address_space *vm; /** GTT and vma for the request */

232
	struct i915_request *request; /** our request to build */
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
	struct i915_vma *batch; /** identity of the batch obj/vma */

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
249
	struct reloc_cache {
250 251 252
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
253
		unsigned int gen; /** Cached value of INTEL_GEN */
254
		bool use_64bit_reloc : 1;
255 256 257
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
258

259
		struct i915_request *rq;
260 261
		u32 *rq_cmd;
		unsigned int rq_size;
262
	} reloc_cache;
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
278 279
};

280
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
281

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
/*
 * Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline u64 gen8_canonical_addr(u64 address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline u64 gen8_noncanonical_addr(u64 address)
{
	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}

301 302
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
303
	return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
304 305
}

306
static int eb_create(struct i915_execbuffer *eb)
307
{
308 309
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
310

311 312 313 314 315 316 317 318 319 320 321
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
322
		do {
323
			gfp_t flags;
324 325 326 327 328 329 330

			/* While we can still reduce the allocation size, don't
			 * raise a warning and allow the allocation to fail.
			 * On the last pass though, we want to try as hard
			 * as possible to perform the allocation and warn
			 * if it fails.
			 */
331
			flags = GFP_KERNEL;
332 333 334
			if (size > 1)
				flags |= __GFP_NORETRY | __GFP_NOWARN;

335
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
336
					      flags);
337 338 339 340
			if (eb->buckets)
				break;
		} while (--size);

341 342
		if (unlikely(!size))
			return -ENOMEM;
343

344
		eb->lut_size = size;
345
	} else {
346
		eb->lut_size = -eb->buffer_count;
347
	}
348

349
	return 0;
350 351
}

352 353
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
354 355
		 const struct i915_vma *vma,
		 unsigned int flags)
356 357 358 359 360 361 362
{
	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

363
	if (flags & EXEC_OBJECT_PINNED &&
364 365 366
	    vma->node.start != entry->offset)
		return true;

367
	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
368 369 370
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

371
	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
372 373 374
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

375 376 377 378
	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
		return true;

379 380 381
	return false;
}

382
static inline bool
383
eb_pin_vma(struct i915_execbuffer *eb,
384
	   const struct drm_i915_gem_exec_object2 *entry,
385 386
	   struct i915_vma *vma)
{
387 388
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
389

390
	if (vma->node.size)
391
		pin_flags = vma->node.start;
392
	else
393
		pin_flags = entry->offset & PIN_OFFSET_MASK;
394

395 396 397
	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
		pin_flags |= PIN_GLOBAL;
398

399 400
	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
		return false;
401

402
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
403
		if (unlikely(i915_vma_pin_fence(vma))) {
404
			i915_vma_unpin(vma);
405
			return false;
406 407
		}

408
		if (vma->fence)
409
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
410 411
	}

412 413
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	return !eb_vma_misplaced(entry, vma, exec_flags);
414 415
}

416
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
417
{
418
	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
419

420
	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
421
		__i915_vma_unpin_fence(vma);
422

423
	__i915_vma_unpin(vma);
424 425
}

426
static inline void
427
eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
428
{
429
	if (!(*flags & __EXEC_OBJECT_HAS_PIN))
430
		return;
431

432 433
	__eb_unreserve_vma(vma, *flags);
	*flags &= ~__EXEC_OBJECT_RESERVED;
434 435
}

436 437 438 439
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
440
{
441 442
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
443

444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
		     entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
461 462
	}

463
	if (unlikely(vma->exec_flags)) {
464 465 466 467 468 469 470 471 472 473 474 475
		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
			  entry->handle, (int)(entry - eb->exec));
		return -EINVAL;
	}

	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

476 477 478 479 480 481 482 483 484 485 486 487
	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

488
	return 0;
489 490
}

491
static int
492 493 494
eb_add_vma(struct i915_execbuffer *eb,
	   unsigned int i, unsigned batch_idx,
	   struct i915_vma *vma)
495
{
496
	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
497 498 499 500 501 502 503 504
	int err;

	GEM_BUG_ON(i915_vma_is_closed(vma));

	if (!(eb->args->flags & __EXEC_VALIDATED)) {
		err = eb_validate_vma(eb, entry, vma);
		if (unlikely(err))
			return err;
505 506
	}

507
	if (eb->lut_size > 0) {
508
		vma->exec_handle = entry->handle;
509
		hlist_add_head(&vma->exec_node,
510 511
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
512
	}
513

514 515 516 517 518 519 520 521 522
	if (entry->relocation_count)
		list_add_tail(&vma->reloc_link, &eb->relocs);

	/*
	 * Stash a pointer from the vma to execobj, so we can query its flags,
	 * size, alignment etc as provided by the user. Also we stash a pointer
	 * to the vma inside the execobj so that we can use a direct lookup
	 * to find the right target VMA when doing relocations.
	 */
523
	eb->vma[i] = vma;
524
	eb->flags[i] = entry->flags;
525
	vma->exec_flags = &eb->flags[i];
526

527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	if (i == batch_idx) {
		if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
			eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
		if (eb->reloc_cache.has_fence)
			eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;

		eb->batch = vma;
	}

545
	err = 0;
546
	if (eb_pin_vma(eb, entry, vma)) {
547 548 549 550
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
551 552 553 554 555 556
	} else {
		eb_unreserve_vma(vma, vma->exec_flags);

		list_add_tail(&vma->exec_link, &eb->unbound);
		if (drm_mm_node_allocated(&vma->node))
			err = i915_vma_unbind(vma);
557 558
		if (unlikely(err))
			vma->exec_flags = NULL;
559 560 561 562 563 564 565 566 567 568
	}
	return err;
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

569 570 571 572 573
	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;
574 575 576 577 578 579 580 581 582

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
			  struct i915_vma *vma)
{
583 584 585
	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
586 587
	int err;

588 589 590
	pin_flags = PIN_USER | PIN_NONBLOCK;
	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
		pin_flags |= PIN_GLOBAL;
591 592 593 594 595

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
596 597
	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		pin_flags |= PIN_ZONE_4G;
598

599 600
	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
		pin_flags |= PIN_MAPPABLE;
601

602 603 604 605 606
	if (exec_flags & EXEC_OBJECT_PINNED) {
		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
		pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
	} else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
607 608
	}

609 610 611
	err = i915_vma_pin(vma,
			   entry->pad_to_size, entry->alignment,
			   pin_flags);
612 613 614 615 616 617 618 619
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

620
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
621
		err = i915_vma_pin_fence(vma);
622 623 624 625 626
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

627
		if (vma->fence)
628
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
629 630
	}

631 632
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
633

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	struct list_head last;
	struct i915_vma *vma;
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
		list_for_each_entry(vma, &eb->unbound, exec_link) {
			err = eb_reserve_vma(eb, vma);
			if (err)
				break;
		}
		if (err != -ENOSPC)
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
674 675
			unsigned int flags = eb->flags[i];
			struct i915_vma *vma = eb->vma[i];
676

677 678
			if (flags & EXEC_OBJECT_PINNED &&
			    flags & __EXEC_OBJECT_HAS_PIN)
679 680
				continue;

681
			eb_unreserve_vma(vma, &eb->flags[i]);
682

683
			if (flags & EXEC_OBJECT_PINNED)
684
				list_add(&vma->exec_link, &eb->unbound);
685
			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
				list_add_tail(&vma->exec_link, &eb->unbound);
			else
				list_add_tail(&vma->exec_link, &last);
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
			err = i915_gem_evict_vm(eb->vm);
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
	} while (1);
707
}
708

709 710
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
711 712 713 714
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
715 716 717 718 719 720 721
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
722 723
	if (unlikely(!ctx))
		return -ENOENT;
724

725
	eb->ctx = ctx;
726
	eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
727 728 729 730 731 732 733 734 735

	eb->context_flags = 0;
	if (ctx->flags & CONTEXT_NO_ZEROMAP)
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
736
{
737
	struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
738
	struct drm_i915_gem_object *obj;
739
	unsigned int i, batch;
740
	int err;
741

742 743 744 745 746 747
	if (unlikely(i915_gem_context_is_closed(eb->ctx)))
		return -ENOENT;

	if (unlikely(i915_gem_context_is_banned(eb->ctx)))
		return -EIO;

748 749
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
750

751 752
	batch = eb_batch_index(eb);

753 754
	for (i = 0; i < eb->buffer_count; i++) {
		u32 handle = eb->exec[i].handle;
755
		struct i915_lut_handle *lut;
756
		struct i915_vma *vma;
757

758 759
		vma = radix_tree_lookup(handles_vma, handle);
		if (likely(vma))
760
			goto add_vma;
761

762
		obj = i915_gem_object_lookup(eb->file, handle);
763
		if (unlikely(!obj)) {
764
			err = -ENOENT;
765
			goto err_vma;
766 767
		}

768
		vma = i915_vma_instance(obj, eb->vm, NULL);
C
Chris Wilson 已提交
769
		if (unlikely(IS_ERR(vma))) {
770
			err = PTR_ERR(vma);
771
			goto err_obj;
772 773
		}

774 775 776 777 778 779 780 781
		lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
		if (unlikely(!lut)) {
			err = -ENOMEM;
			goto err_obj;
		}

		err = radix_tree_insert(handles_vma, handle, vma);
		if (unlikely(err)) {
782
			kmem_cache_free(eb->i915->luts, lut);
783
			goto err_obj;
784
		}
785

786
		/* transfer ref to ctx */
787 788
		if (!vma->open_count++)
			i915_vma_reopen(vma);
789 790 791 792 793
		list_add(&lut->obj_link, &obj->lut_list);
		list_add(&lut->ctx_link, &eb->ctx->handles_list);
		lut->ctx = eb->ctx;
		lut->handle = handle;

794
add_vma:
795
		err = eb_add_vma(eb, i, batch, vma);
796
		if (unlikely(err))
797
			goto err_vma;
798

799 800
		GEM_BUG_ON(vma != eb->vma[i]);
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
801 802
		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
			   eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
803 804
	}

805 806 807
	eb->args->flags |= __EXEC_VALIDATED;
	return eb_reserve(eb);

808
err_obj:
809
	i915_gem_object_put(obj);
810 811
err_vma:
	eb->vma[i] = NULL;
812
	return err;
813 814
}

815
static struct i915_vma *
816
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
817
{
818 819
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
820
			return NULL;
821
		return eb->vma[handle];
822 823
	} else {
		struct hlist_head *head;
824
		struct i915_vma *vma;
825

826
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
827
		hlist_for_each_entry(vma, head, exec_node) {
828 829
			if (vma->exec_handle == handle)
				return vma;
830 831 832
		}
		return NULL;
	}
833 834
}

835
static void eb_release_vmas(const struct i915_execbuffer *eb)
836
{
837 838 839 840
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
841 842
		struct i915_vma *vma = eb->vma[i];
		unsigned int flags = eb->flags[i];
843

844
		if (!vma)
845
			break;
846

847 848 849
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
		vma->exec_flags = NULL;
		eb->vma[i] = NULL;
850

851 852
		if (flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, flags);
853

854
		if (flags & __EXEC_OBJECT_HAS_REF)
855
			i915_vma_put(vma);
856
	}
857 858
}

859
static void eb_reset_vmas(const struct i915_execbuffer *eb)
860
{
861
	eb_release_vmas(eb);
862
	if (eb->lut_size > 0)
863 864
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_size);
865 866
}

867
static void eb_destroy(const struct i915_execbuffer *eb)
868
{
869 870
	GEM_BUG_ON(eb->reloc_cache.rq);

871
	if (eb->lut_size > 0)
872
		kfree(eb->buckets);
873 874
}

875
static inline u64
876
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
877
		  const struct i915_vma *target)
878
{
879
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
880 881
}

882 883
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
884
{
885
	cache->page = -1;
886
	cache->vaddr = 0;
887
	/* Must be a variable in the struct to allow GCC to unroll. */
888
	cache->gen = INTEL_GEN(i915);
889
	cache->has_llc = HAS_LLC(i915);
890
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
891 892
	cache->has_fence = cache->gen < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
893
	cache->node.allocated = false;
894 895
	cache->rq = NULL;
	cache->rq_size = 0;
896
}
897

898 899 900 901 902 903 904 905
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
906 907
}

908 909
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

910 911 912 913 914 915 916
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

917 918 919 920 921 922 923
static void reloc_gpu_flush(struct reloc_cache *cache)
{
	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
	i915_gem_object_unpin_map(cache->rq->batch->obj);
	i915_gem_chipset_flush(cache->rq->i915);

924
	i915_request_add(cache->rq);
925 926 927
	cache->rq = NULL;
}

928
static void reloc_cache_reset(struct reloc_cache *cache)
929
{
930
	void *vaddr;
931

932 933 934
	if (cache->rq)
		reloc_gpu_flush(cache);

935 936
	if (!cache->vaddr)
		return;
937

938 939 940 941
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
942

943 944 945
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
946
		wmb();
947
		io_mapping_unmap_atomic((void __iomem *)vaddr);
948
		if (cache->node.allocated) {
949
			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
950

951 952 953
			ggtt->vm.clear_range(&ggtt->vm,
					     cache->node.start,
					     cache->node.size);
954 955 956
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
957
		}
958
	}
959 960 961

	cache->vaddr = 0;
	cache->page = -1;
962 963 964 965
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
966
			unsigned long page)
967
{
968 969 970 971 972 973
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
974
		int err;
975

976 977 978
		err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
		if (err)
			return ERR_PTR(err);
979 980 981

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
982

983 984 985 986
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
987 988
	}

989 990
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
991
	cache->page = page;
992

993
	return vaddr;
994 995
}

996 997
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
998
			 unsigned long page)
999
{
1000
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1001
	unsigned long offset;
1002
	void *vaddr;
1003

1004
	if (cache->vaddr) {
1005
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1006 1007
	} else {
		struct i915_vma *vma;
1008
		int err;
1009

1010
		if (use_cpu_reloc(cache, obj))
1011
			return NULL;
1012

1013 1014 1015
		err = i915_gem_object_set_to_gtt_domain(obj, true);
		if (err)
			return ERR_PTR(err);
1016

1017
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1018 1019 1020
					       PIN_MAPPABLE |
					       PIN_NONBLOCK |
					       PIN_NONFAULT);
1021 1022
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1023
			err = drm_mm_insert_node_in_range
1024
				(&ggtt->vm.mm, &cache->node,
1025
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1026
				 0, ggtt->mappable_end,
1027
				 DRM_MM_INSERT_LOW);
1028
			if (err) /* no inactive aperture space, use cpu reloc */
1029
				return NULL;
1030
		} else {
1031 1032
			err = i915_vma_put_fence(vma);
			if (err) {
1033
				i915_vma_unpin(vma);
1034
				return ERR_PTR(err);
1035
			}
1036

1037 1038
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1039
		}
1040
	}
1041

1042 1043
	offset = cache->node.start;
	if (cache->node.allocated) {
1044
		wmb();
1045 1046 1047
		ggtt->vm.insert_page(&ggtt->vm,
				     i915_gem_object_get_dma_address(obj, page),
				     offset, I915_CACHE_NONE, 0);
1048 1049
	} else {
		offset += page << PAGE_SHIFT;
1050 1051
	}

1052
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1053
							 offset);
1054 1055
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1056

1057
	return vaddr;
1058 1059
}

1060 1061
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1062
			 unsigned long page)
1063
{
1064
	void *vaddr;
1065

1066 1067 1068 1069 1070 1071 1072 1073
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1074 1075
	}

1076
	return vaddr;
1077 1078
}

1079
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1080
{
1081 1082 1083 1084 1085
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1086

1087
		*addr = value;
1088

1089 1090
		/*
		 * Writes to the same cacheline are serialised by the CPU
1091 1092 1093 1094 1095 1096 1097 1098 1099
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1100 1101
}

1102 1103 1104 1105 1106 1107
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     struct i915_vma *vma,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	struct drm_i915_gem_object *obj;
1108
	struct i915_request *rq;
1109 1110 1111 1112
	struct i915_vma *batch;
	u32 *cmd;
	int err;

1113
	GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
1114 1115 1116 1117 1118 1119

	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	cmd = i915_gem_object_pin_map(obj,
1120 1121 1122
				      cache->has_llc ?
				      I915_MAP_FORCE_WB :
				      I915_MAP_FORCE_WC);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	i915_gem_object_unpin_pages(obj);
	if (IS_ERR(cmd))
		return PTR_ERR(cmd);

	err = i915_gem_object_set_to_wc_domain(obj, false);
	if (err)
		goto err_unmap;

	batch = i915_vma_instance(obj, vma->vm, NULL);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
	}

	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
	if (err)
		goto err_unmap;

1141
	rq = i915_request_alloc(eb->engine, eb->ctx);
1142 1143 1144 1145 1146
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

1147
	err = i915_request_await_object(rq, vma->obj, true);
1148 1149 1150 1151 1152 1153 1154 1155 1156
	if (err)
		goto err_request;

	err = eb->engine->emit_bb_start(rq,
					batch->node.start, PAGE_SIZE,
					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
	if (err)
		goto err_request;

1157
	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
1158
	i915_vma_move_to_active(batch, rq, 0);
1159 1160 1161
	reservation_object_lock(batch->resv, NULL);
	reservation_object_add_excl_fence(batch->resv, &rq->fence);
	reservation_object_unlock(batch->resv);
1162 1163
	i915_vma_unpin(batch);

1164
	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1165 1166 1167
	reservation_object_lock(vma->resv, NULL);
	reservation_object_add_excl_fence(vma->resv, &rq->fence);
	reservation_object_unlock(vma->resv);
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178

	rq->batch = batch;

	cache->rq = rq;
	cache->rq_cmd = cmd;
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
	return 0;

err_request:
1179
	i915_request_add(rq);
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
	i915_gem_object_unpin_map(obj);
	return err;
}

static u32 *reloc_gpu(struct i915_execbuffer *eb,
		      struct i915_vma *vma,
		      unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	u32 *cmd;

	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
		reloc_gpu_flush(cache);

	if (unlikely(!cache->rq)) {
		int err;

1200 1201 1202 1203
		/* If we need to copy for the cmdparser, we will stall anyway */
		if (eb_use_cmdparser(eb))
			return ERR_PTR(-EWOULDBLOCK);

1204 1205 1206
		if (!intel_engine_can_store_dword(eb->engine))
			return ERR_PTR(-ENODEV);

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
		err = __reloc_gpu_alloc(eb, vma, len);
		if (unlikely(err))
			return ERR_PTR(err);
	}

	cmd = cache->rq_cmd + cache->rq_size;
	cache->rq_size += len;

	return cmd;
}

1218 1219
static u64
relocate_entry(struct i915_vma *vma,
1220
	       const struct drm_i915_gem_relocation_entry *reloc,
1221 1222
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1223
{
1224
	u64 offset = reloc->offset;
1225 1226
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1227
	void *vaddr;
1228

1229 1230
	if (!eb->reloc_cache.vaddr &&
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1231
	     !reservation_object_test_signaled_rcu(vma->resv, true))) {
1232 1233 1234 1235 1236 1237 1238 1239 1240
		const unsigned int gen = eb->reloc_cache.gen;
		unsigned int len;
		u32 *batch;
		u64 addr;

		if (wide)
			len = offset & 7 ? 8 : 5;
		else if (gen >= 4)
			len = 4;
1241
		else
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
			len = 3;

		batch = reloc_gpu(eb, vma, len);
		if (IS_ERR(batch))
			goto repeat;

		addr = gen8_canonical_addr(vma->node.start + offset);
		if (wide) {
			if (offset & 7) {
				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);

				addr = gen8_canonical_addr(addr + 4);

				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = upper_32_bits(target_offset);
			} else {
				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);
				*batch++ = upper_32_bits(target_offset);
			}
		} else if (gen >= 6) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else if (gen >= 4) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else {
			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
			*batch++ = addr;
			*batch++ = target_offset;
		}

		goto out;
	}

1288
repeat:
1289
	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1290 1291 1292 1293 1294
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1295
			eb->reloc_cache.vaddr);
1296 1297 1298 1299 1300 1301

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1302 1303
	}

1304
out:
1305
	return target->node.start | UPDATE;
1306 1307
}

1308 1309 1310 1311
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
		  struct i915_vma *vma,
		  const struct drm_i915_gem_relocation_entry *reloc)
1312
{
1313
	struct i915_vma *target;
1314
	int err;
1315

1316
	/* we've already hold a reference to all valid objects */
1317 1318
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1319
		return -ENOENT;
1320

1321
	/* Validate that the target is in a valid r/w GPU domain */
1322
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1323
		DRM_DEBUG("reloc with multiple write domains: "
1324
			  "target %d offset %d "
1325
			  "read %08x write %08x",
1326
			  reloc->target_handle,
1327 1328 1329
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1330
		return -EINVAL;
1331
	}
1332 1333
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1334
		DRM_DEBUG("reloc with read/write non-GPU domains: "
1335
			  "target %d offset %d "
1336
			  "read %08x write %08x",
1337
			  reloc->target_handle,
1338 1339 1340
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1341
		return -EINVAL;
1342 1343
	}

1344
	if (reloc->write_domain) {
1345
		*target->exec_flags |= EXEC_OBJECT_WRITE;
1346

1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
		    IS_GEN6(eb->i915)) {
			err = i915_vma_bind(target, target->obj->cache_level,
					    PIN_GLOBAL);
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1361
	}
1362

1363 1364
	/*
	 * If the relocation already has the right value in it, no
1365 1366
	 * more work needs to be done.
	 */
1367 1368
	if (!DBG_FORCE_RELOC &&
	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1369
		return 0;
1370 1371

	/* Check that the relocation address is valid... */
1372
	if (unlikely(reloc->offset >
1373
		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1374
		DRM_DEBUG("Relocation beyond object bounds: "
1375 1376 1377 1378
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
			  (int)vma->size);
1379
		return -EINVAL;
1380
	}
1381
	if (unlikely(reloc->offset & 3)) {
1382
		DRM_DEBUG("Relocation not 4-byte aligned: "
1383 1384 1385
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1386
		return -EINVAL;
1387 1388
	}

1389 1390 1391 1392 1393 1394
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
1395
	 * out of our synchronisation.
1396
	 */
1397
	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1398

1399
	/* and update the user's relocation entry */
1400
	return relocate_entry(vma, reloc, eb, target);
1401 1402
}

1403
static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1404
{
1405
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1406 1407
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
1408
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1409
	unsigned int remain;
1410

1411
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1412
	remain = entry->relocation_count;
1413 1414
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1415

1416 1417 1418 1419 1420
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
1421
	if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
1422 1423 1424 1425 1426 1427 1428
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1429

1430 1431
		/*
		 * This is the fast path and we cannot handle a pagefault
1432 1433 1434 1435 1436 1437 1438
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1439
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1440
		pagefault_enable();
1441 1442
		if (unlikely(copied)) {
			remain = -EFAULT;
1443 1444
			goto out;
		}
1445

1446
		remain -= count;
1447
		do {
1448
			u64 offset = eb_relocate_entry(eb, vma, r);
1449

1450 1451 1452
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1453
				goto out;
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
				__put_user(offset,
					   &urelocs[r-stack].presumed_offset);
1479
			}
1480 1481 1482
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1483
out:
1484
	reloc_cache_reset(&eb->reloc_cache);
1485
	return remain;
1486 1487 1488
}

static int
1489
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1490
{
1491
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1492 1493 1494 1495
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1496 1497

	for (i = 0; i < entry->relocation_count; i++) {
1498
		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1499

1500 1501 1502 1503
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1504
	}
1505 1506 1507 1508
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1509 1510
}

1511
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1512
{
1513 1514 1515
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1516

1517 1518 1519
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1520

1521 1522
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1523

1524 1525 1526 1527
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
	if (!access_ok(VERIFY_READ, addr, size))
		return -EFAULT;
1528

1529 1530 1531 1532 1533
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1534
	}
1535
	return __get_user(c, end - 1);
1536
}
1537

1538
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1539
{
1540 1541 1542
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1543

1544 1545 1546 1547 1548 1549
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		struct drm_i915_gem_relocation_entry *relocs;
		unsigned long size;
		unsigned long copied;
1550

1551 1552
		if (nreloc == 0)
			continue;
1553

1554 1555 1556
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1557

1558 1559
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1560

1561
		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1562 1563 1564 1565 1566
		if (!relocs) {
			kvfree(relocs);
			err = -ENOMEM;
			goto err;
		}
1567

1568 1569 1570 1571 1572 1573 1574
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
1575
					     (char __user *)urelocs + copied,
1576 1577 1578 1579 1580
					     len)) {
				kvfree(relocs);
				err = -EFAULT;
				goto err;
			}
1581

1582 1583
			copied += len;
		} while (copied < size);
1584

1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		user_access_begin();
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
end_user:
		user_access_end();
1602

1603 1604
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1605

1606
	return 0;
1607

1608 1609 1610 1611 1612 1613 1614 1615
err:
	while (i--) {
		struct drm_i915_gem_relocation_entry *relocs =
			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1616 1617
}

1618
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1619
{
1620 1621
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1622

1623
	if (unlikely(i915_modparams.prefault_disable))
1624
		return 0;
1625

1626 1627
	for (i = 0; i < count; i++) {
		int err;
1628

1629 1630 1631 1632
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1633

1634
	return 0;
1635 1636
}

1637
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1638
{
1639
	struct drm_device *dev = &eb->i915->drm;
1640
	bool have_copy = false;
1641
	struct i915_vma *vma;
1642 1643 1644 1645 1646 1647 1648
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1649

1650
	/* We may process another execbuffer during the unlock... */
1651
	eb_reset_vmas(eb);
1652 1653
	mutex_unlock(&dev->struct_mutex);

1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1675
	}
1676 1677 1678
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1679 1680
	}

1681 1682 1683
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1684 1685
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1686
		mutex_lock(&dev->struct_mutex);
1687
		goto out;
1688 1689
	}

1690
	/* reacquire the objects */
1691 1692
	err = eb_lookup_vmas(eb);
	if (err)
1693
		goto err;
1694

1695 1696
	GEM_BUG_ON(!eb->batch);

1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
	list_for_each_entry(vma, &eb->relocs, reloc_link) {
		if (!have_copy) {
			pagefault_disable();
			err = eb_relocate_vma(eb, vma);
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
			err = eb_relocate_vma_slow(eb, vma);
			if (err)
				goto err;
		}
1709 1710
	}

1711 1712
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1713 1714 1715 1716 1717 1718
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

1740
	return err;
1741 1742
}

1743
static int eb_relocate(struct i915_execbuffer *eb)
1744
{
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
	if (eb_lookup_vmas(eb))
		goto slow;

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, vma))
				goto slow;
		}
	}

	return 0;

slow:
	return eb_relocate_slow(eb);
}

1764
static void eb_export_fence(struct i915_vma *vma,
1765
			    struct i915_request *rq,
1766 1767
			    unsigned int flags)
{
1768
	struct reservation_object *resv = vma->resv;
1769 1770 1771 1772 1773 1774 1775 1776

	/*
	 * Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
	reservation_object_lock(resv, NULL);
	if (flags & EXEC_OBJECT_WRITE)
1777
		reservation_object_add_excl_fence(resv, &rq->fence);
1778
	else if (reservation_object_reserve_shared(resv) == 0)
1779
		reservation_object_add_shared_fence(resv, &rq->fence);
1780 1781 1782 1783 1784 1785 1786 1787
	reservation_object_unlock(resv);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1788

1789
	for (i = 0; i < count; i++) {
1790 1791
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];
1792
		struct drm_i915_gem_object *obj = vma->obj;
1793

1794
		if (flags & EXEC_OBJECT_CAPTURE) {
1795
			struct i915_capture_list *capture;
1796 1797 1798 1799 1800

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (unlikely(!capture))
				return -ENOMEM;

1801
			capture->next = eb->request->capture_list;
1802
			capture->vma = eb->vma[i];
1803
			eb->request->capture_list = capture;
1804 1805
		}

1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
		/*
		 * If the GPU is not _reading_ through the CPU cache, we need
		 * to make sure that any writes (both previous GPU writes from
		 * before a change in snooping levels and normal CPU writes)
		 * caught in that cache are flushed to main memory.
		 *
		 * We want to say
		 *   obj->cache_dirty &&
		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		 * but gcc's optimiser doesn't handle that as well and emits
		 * two jumps instead of one. Maybe one day...
		 */
		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1819
			if (i915_gem_clflush_object(obj, 0))
1820
				flags &= ~EXEC_OBJECT_ASYNC;
1821 1822
		}

1823 1824
		if (flags & EXEC_OBJECT_ASYNC)
			continue;
1825

1826
		err = i915_request_await_object
1827
			(eb->request, obj, flags & EXEC_OBJECT_WRITE);
1828 1829 1830 1831 1832
		if (err)
			return err;
	}

	for (i = 0; i < count; i++) {
1833 1834 1835 1836 1837
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];

		i915_vma_move_to_active(vma, eb->request, flags);
		eb_export_fence(vma, eb->request, flags);
1838

1839 1840 1841 1842
		__eb_unreserve_vma(vma, flags);
		vma->exec_flags = NULL;

		if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1843
			i915_vma_put(vma);
1844
	}
1845
	eb->exec = NULL;
1846

1847
	/* Unconditionally flush any chipset caches (for streaming writes). */
1848
	i915_gem_chipset_flush(eb->i915);
1849

1850
	return 0;
1851 1852
}

1853
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1854
{
1855
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1856 1857
		return false;

C
Chris Wilson 已提交
1858
	/* Kernel clipping was a DRI1 misfeature */
1859 1860 1861 1862
	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
		if (exec->num_cliprects || exec->cliprects_ptr)
			return false;
	}
C
Chris Wilson 已提交
1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1875 1876
}

1877
void i915_vma_move_to_active(struct i915_vma *vma,
1878
			     struct i915_request *rq,
1879 1880 1881
			     unsigned int flags)
{
	struct drm_i915_gem_object *obj = vma->obj;
1882
	const unsigned int idx = rq->engine->id;
1883

1884
	lockdep_assert_held(&rq->i915->drm.struct_mutex);
1885 1886
	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

1887 1888
	/*
	 * Add a reference if we're newly entering the active list.
1889 1890 1891 1892 1893 1894
	 * The order in which we add operations to the retirement queue is
	 * vital here: mark_active adds to the start of the callback list,
	 * such that subsequent callbacks are called first. Therefore we
	 * add the active reference first and queue for it to be dropped
	 * *last*.
	 */
1895 1896 1897
	if (!i915_vma_is_active(vma))
		obj->active_count++;
	i915_vma_set_active(vma, idx);
1898
	i915_gem_active_set(&vma->last_read[idx], rq);
1899
	list_move_tail(&vma->vm_link, &vma->vm->active_list);
1900

1901
	obj->write_domain = 0;
1902
	if (flags & EXEC_OBJECT_WRITE) {
1903
		obj->write_domain = I915_GEM_DOMAIN_RENDER;
1904

1905
		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1906
			i915_gem_active_set(&obj->frontbuffer_write, rq);
1907

1908
		obj->read_domains = 0;
1909
	}
1910
	obj->read_domains |= I915_GEM_GPU_DOMAINS;
1911

1912
	if (flags & EXEC_OBJECT_NEEDS_FENCE)
1913
		i915_gem_active_set(&vma->last_fence, rq);
1914 1915
}

1916
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1917
{
1918 1919
	u32 *cs;
	int i;
1920

1921
	if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
1922 1923 1924
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1925

1926
	cs = intel_ring_begin(rq, 4 * 2 + 2);
1927 1928
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1929

1930
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1931
	for (i = 0; i < 4; i++) {
1932 1933
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1934
	}
1935
	*cs++ = MI_NOOP;
1936
	intel_ring_advance(rq, cs);
1937 1938 1939 1940

	return 0;
}

1941
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1942 1943
{
	struct drm_i915_gem_object *shadow_batch_obj;
1944
	struct i915_vma *vma;
1945
	int err;
1946

1947 1948
	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
1949
	if (IS_ERR(shadow_batch_obj))
1950
		return ERR_CAST(shadow_batch_obj);
1951

1952
	err = intel_engine_cmd_parser(eb->engine,
1953
				      eb->batch->obj,
1954
				      shadow_batch_obj,
1955 1956
				      eb->batch_start_offset,
				      eb->batch_len,
1957
				      is_master);
1958 1959
	if (err) {
		if (err == -EACCES) /* unhandled chained batch */
C
Chris Wilson 已提交
1960 1961
			vma = NULL;
		else
1962
			vma = ERR_PTR(err);
C
Chris Wilson 已提交
1963 1964
		goto out;
	}
1965

C
Chris Wilson 已提交
1966 1967 1968
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
1969

1970 1971 1972 1973 1974
	eb->vma[eb->buffer_count] = i915_vma_get(vma);
	eb->flags[eb->buffer_count] =
		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
	vma->exec_flags = &eb->flags[eb->buffer_count];
	eb->buffer_count++;
1975

C
Chris Wilson 已提交
1976
out:
C
Chris Wilson 已提交
1977
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
1978
	return vma;
1979
}
1980

1981
static void
1982
add_to_client(struct i915_request *rq, struct drm_file *file)
1983
{
1984 1985
	rq->file_priv = file->driver_priv;
	list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
1986 1987
}

1988
static int eb_submit(struct i915_execbuffer *eb)
1989
{
1990
	int err;
1991

1992 1993 1994
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
1995

1996
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
1997 1998 1999
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
2000 2001
	}

2002
	err = eb->engine->emit_bb_start(eb->request,
2003 2004 2005
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
2006 2007 2008
					eb->batch_flags);
	if (err)
		return err;
2009

C
Chris Wilson 已提交
2010
	return 0;
2011 2012
}

2013
/*
2014
 * Find one BSD ring to dispatch the corresponding BSD command.
2015
 * The engine index is returned.
2016
 */
2017
static unsigned int
2018 2019
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
2020 2021 2022
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

2023
	/* Check whether the file_priv has already selected one ring. */
2024 2025 2026
	if ((int)file_priv->bsd_engine < 0)
		file_priv->bsd_engine = atomic_fetch_xor(1,
			 &dev_priv->mm.bsd_engine_dispatch_index);
2027

2028
	return file_priv->bsd_engine;
2029 2030
}

2031 2032
#define I915_USER_RINGS (4)

2033
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
2034 2035 2036 2037 2038 2039 2040
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

2041 2042 2043 2044
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
2045 2046
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2047
	struct intel_engine_cs *engine;
2048 2049 2050

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
2051
		return NULL;
2052 2053 2054 2055 2056 2057
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
2058
		return NULL;
2059 2060 2061 2062 2063 2064
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2065
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
2066 2067
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2068
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2069 2070 2071 2072
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
2073
			return NULL;
2074 2075
		}

2076
		engine = dev_priv->engine[_VCS(bsd_idx)];
2077
	} else {
2078
		engine = dev_priv->engine[user_ring_map[user_ring_id]];
2079 2080
	}

2081
	if (!engine) {
2082
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
2083
		return NULL;
2084 2085
	}

2086
	return engine;
2087 2088
}

2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
static void
__free_fence_array(struct drm_syncobj **fences, unsigned int n)
{
	while (n--)
		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
	kvfree(fences);
}

static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_file *file)
{
2101
	const unsigned long nfences = args->num_cliprects;
2102 2103
	struct drm_i915_gem_exec_fence __user *user;
	struct drm_syncobj **fences;
2104
	unsigned long n;
2105 2106 2107 2108 2109
	int err;

	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
		return NULL;

2110 2111 2112 2113 2114
	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
	if (nfences > min_t(unsigned long,
			    ULONG_MAX / sizeof(*user),
			    SIZE_MAX / sizeof(*fences)))
2115 2116 2117
		return ERR_PTR(-EINVAL);

	user = u64_to_user_ptr(args->cliprects_ptr);
2118
	if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user)))
2119 2120
		return ERR_PTR(-EFAULT);

2121
	fences = kvmalloc_array(nfences, sizeof(*fences),
2122
				__GFP_NOWARN | GFP_KERNEL);
2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
	if (!fences)
		return ERR_PTR(-ENOMEM);

	for (n = 0; n < nfences; n++) {
		struct drm_i915_gem_exec_fence fence;
		struct drm_syncobj *syncobj;

		if (__copy_from_user(&fence, user++, sizeof(fence))) {
			err = -EFAULT;
			goto err;
		}

2135 2136 2137 2138 2139
		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
			err = -EINVAL;
			goto err;
		}

2140 2141 2142 2143 2144 2145 2146
		syncobj = drm_syncobj_find(file, fence.handle);
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
			err = -ENOENT;
			goto err;
		}

2147 2148 2149
		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);

2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
	}

	return fences;

err:
	__free_fence_array(fences, n);
	return ERR_PTR(err);
}

static void
put_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_syncobj **fences)
{
	if (fences)
		__free_fence_array(fences, args->num_cliprects);
}

static int
await_fence_array(struct i915_execbuffer *eb,
		  struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	unsigned int n;
	int err;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		struct dma_fence *fence;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_WAIT))
			continue;

J
Jason Ekstrand 已提交
2185
		fence = drm_syncobj_fence_get(syncobj);
2186 2187 2188
		if (!fence)
			return -EINVAL;

2189
		err = i915_request_await_dma_fence(eb->request, fence);
2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217
		dma_fence_put(fence);
		if (err < 0)
			return err;
	}

	return 0;
}

static void
signal_fence_array(struct i915_execbuffer *eb,
		   struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	struct dma_fence * const fence = &eb->request->fence;
	unsigned int n;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_SIGNAL))
			continue;

		drm_syncobj_replace_fence(syncobj, fence);
	}
}

2218
static int
2219
i915_gem_do_execbuffer(struct drm_device *dev,
2220 2221
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
2222 2223
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_syncobj **fences)
2224
{
2225
	struct i915_execbuffer eb;
2226 2227 2228
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
2229
	int err;
2230

2231
	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2232 2233
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2234

2235 2236 2237
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
2238
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2239
		args->flags |= __EXEC_HAS_RELOC;
2240

2241
	eb.exec = exec;
2242 2243
	eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
	eb.vma[0] = NULL;
2244 2245
	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);

2246 2247 2248
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(eb.i915))
		eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
2249 2250
	reloc_cache_init(&eb.reloc_cache, eb.i915);

2251
	eb.buffer_count = args->buffer_count;
2252 2253 2254
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

2255
	eb.batch_flags = 0;
2256
	if (args->flags & I915_EXEC_SECURE) {
2257
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2258 2259
		    return -EPERM;

2260
		eb.batch_flags |= I915_DISPATCH_SECURE;
2261
	}
2262
	if (args->flags & I915_EXEC_IS_PINNED)
2263
		eb.batch_flags |= I915_DISPATCH_PINNED;
2264

2265 2266
	eb.engine = eb_select_engine(eb.i915, file, args);
	if (!eb.engine)
2267 2268
		return -EINVAL;

2269
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
2270
		if (!HAS_RESOURCE_STREAMER(eb.i915)) {
2271 2272 2273
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
2274
		if (eb.engine->id != RCS) {
2275
			DRM_DEBUG("RS is not available on %s\n",
2276
				 eb.engine->name);
2277 2278 2279
			return -EINVAL;
		}

2280
		eb.batch_flags |= I915_DISPATCH_RS;
2281 2282
	}

2283 2284
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2285 2286
		if (!in_fence)
			return -EINVAL;
2287 2288 2289 2290 2291
	}

	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
2292
			err = out_fence_fd;
2293
			goto err_in_fence;
2294 2295 2296
		}
	}

2297 2298 2299 2300 2301
	err = eb_create(&eb);
	if (err)
		goto err_out_fence;

	GEM_BUG_ON(!eb.lut_size);
2302

2303 2304 2305 2306
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_destroy;

2307 2308
	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
2309 2310 2311 2312 2313
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
2314
	intel_runtime_pm_get(eb.i915);
2315

2316 2317 2318
	err = i915_mutex_lock_interruptible(dev);
	if (err)
		goto err_rpm;
2319

2320
	err = eb_relocate(&eb);
2321
	if (err) {
2322 2323 2324 2325 2326 2327 2328 2329 2330
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
		goto err_vma;
2331
	}
2332

2333
	if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2334
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2335 2336
		err = -EINVAL;
		goto err_vma;
2337
	}
2338 2339
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2340
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2341 2342
		err = -EINVAL;
		goto err_vma;
2343
	}
2344

2345
	if (eb_use_cmdparser(&eb)) {
2346 2347
		struct i915_vma *vma;

2348
		vma = eb_parse(&eb, drm_is_current_master(file));
2349
		if (IS_ERR(vma)) {
2350 2351
			err = PTR_ERR(vma);
			goto err_vma;
2352
		}
2353

2354
		if (vma) {
2355 2356 2357 2358 2359 2360 2361 2362 2363
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
2364
			eb.batch_flags |= I915_DISPATCH_SECURE;
2365 2366
			eb.batch_start_offset = 0;
			eb.batch = vma;
2367
		}
2368 2369
	}

2370 2371
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
2372

2373 2374
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2375
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2376
	 * hsw should have this fixed, but bdw mucks it up again. */
2377
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2378
		struct i915_vma *vma;
2379

2380 2381 2382 2383 2384 2385
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2386
		 *   so we don't really have issues with multiple objects not
2387 2388 2389
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2390
		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2391
		if (IS_ERR(vma)) {
2392 2393
			err = PTR_ERR(vma);
			goto err_vma;
C
Chris Wilson 已提交
2394
		}
2395

2396
		eb.batch = vma;
2397
	}
2398

2399 2400 2401
	/* All GPU relocation batches must be submitted prior to the user rq */
	GEM_BUG_ON(eb.reloc_cache.rq);

2402
	/* Allocate a request for this batch buffer nice and early. */
2403
	eb.request = i915_request_alloc(eb.engine, eb.ctx);
2404
	if (IS_ERR(eb.request)) {
2405
		err = PTR_ERR(eb.request);
2406
		goto err_batch_unpin;
2407
	}
2408

2409
	if (in_fence) {
2410
		err = i915_request_await_dma_fence(eb.request, in_fence);
2411
		if (err < 0)
2412 2413 2414
			goto err_request;
	}

2415 2416 2417 2418 2419 2420
	if (fences) {
		err = await_fence_array(&eb, fences);
		if (err)
			goto err_request;
	}

2421
	if (out_fence_fd != -1) {
2422
		out_fence = sync_file_create(&eb.request->fence);
2423
		if (!out_fence) {
2424
			err = -ENOMEM;
2425 2426 2427 2428
			goto err_request;
		}
	}

2429 2430
	/*
	 * Whilst this request exists, batch_obj will be on the
2431 2432 2433 2434 2435
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2436
	eb.request->batch = eb.batch;
2437

2438
	trace_i915_request_queue(eb.request, eb.batch_flags);
2439
	err = eb_submit(&eb);
2440
err_request:
2441
	i915_request_add(eb.request);
2442
	add_to_client(eb.request, file);
2443

2444 2445 2446
	if (fences)
		signal_fence_array(&eb, fences);

2447
	if (out_fence) {
2448
		if (err == 0) {
2449
			fd_install(out_fence_fd, out_fence->file);
2450
			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2451 2452 2453 2454 2455 2456
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2457

2458
err_batch_unpin:
2459
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2460
		i915_vma_unpin(eb.batch);
2461 2462 2463
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
2464
	mutex_unlock(&dev->struct_mutex);
2465
err_rpm:
2466
	intel_runtime_pm_put(eb.i915);
2467 2468
	i915_gem_context_put(eb.ctx);
err_destroy:
2469
	eb_destroy(&eb);
2470
err_out_fence:
2471 2472
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2473
err_in_fence:
2474
	dma_fence_put(in_fence);
2475
	return err;
2476 2477
}

2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
static size_t eb_element_size(void)
{
	return (sizeof(struct drm_i915_gem_exec_object2) +
		sizeof(struct i915_vma *) +
		sizeof(unsigned int));
}

static bool check_buffer_count(size_t count)
{
	const size_t sz = eb_element_size();

	/*
	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
	 * array size (see eb_create()). Otherwise, we can accept an array as
	 * large as can be addressed (though use large arrays at your peril)!
	 */

	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}

2498 2499 2500 2501 2502
/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
2503 2504
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
2505 2506 2507 2508 2509
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2510
	const size_t count = args->buffer_count;
2511 2512
	unsigned int i;
	int err;
2513

2514 2515
	if (!check_buffer_count(count)) {
		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2516 2517 2518
		return -EINVAL;
	}

2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

	if (!i915_gem_check_execbuffer(&exec2))
		return -EINVAL;

2533
	/* Copy in the exec list from userland */
2534
	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2535
				   __GFP_NOWARN | GFP_KERNEL);
2536
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2537
				    __GFP_NOWARN | GFP_KERNEL);
2538
	if (exec_list == NULL || exec2_list == NULL) {
2539
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2540
			  args->buffer_count);
M
Michal Hocko 已提交
2541 2542
		kvfree(exec_list);
		kvfree(exec2_list);
2543 2544
		return -ENOMEM;
	}
2545
	err = copy_from_user(exec_list,
2546
			     u64_to_user_ptr(args->buffers_ptr),
2547
			     sizeof(*exec_list) * count);
2548
	if (err) {
2549
		DRM_DEBUG("copy %d exec entries failed %d\n",
2550
			  args->buffer_count, err);
M
Michal Hocko 已提交
2551 2552
		kvfree(exec_list);
		kvfree(exec2_list);
2553 2554 2555 2556 2557 2558 2559 2560 2561
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2562
		if (INTEL_GEN(to_i915(dev)) < 4)
2563 2564 2565 2566 2567
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2568
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2569
	if (exec2.flags & __EXEC_HAS_RELOC) {
2570
		struct drm_i915_gem_exec_object __user *user_exec_list =
2571
			u64_to_user_ptr(args->buffers_ptr);
2572

2573
		/* Copy the new buffer offsets back to the user's exec list. */
2574
		for (i = 0; i < args->buffer_count; i++) {
2575 2576 2577
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2578
			exec2_list[i].offset =
2579 2580 2581 2582 2583
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
2584
				break;
2585 2586 2587
		}
	}

M
Michal Hocko 已提交
2588 2589
	kvfree(exec_list);
	kvfree(exec2_list);
2590
	return err;
2591 2592 2593
}

int
2594 2595
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file)
2596 2597
{
	struct drm_i915_gem_execbuffer2 *args = data;
2598
	struct drm_i915_gem_exec_object2 *exec2_list;
2599
	struct drm_syncobj **fences = NULL;
2600
	const size_t count = args->buffer_count;
2601
	int err;
2602

2603 2604
	if (!check_buffer_count(count)) {
		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2605 2606 2607
		return -EINVAL;
	}

2608 2609 2610 2611
	if (!i915_gem_check_execbuffer(args))
		return -EINVAL;

	/* Allocate an extra slot for use by the command parser */
2612
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2613
				    __GFP_NOWARN | GFP_KERNEL);
2614
	if (exec2_list == NULL) {
2615 2616
		DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
			  count);
2617 2618
		return -ENOMEM;
	}
2619 2620
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
2621 2622
			   sizeof(*exec2_list) * count)) {
		DRM_DEBUG("copy %zd exec entries failed\n", count);
M
Michal Hocko 已提交
2623
		kvfree(exec2_list);
2624 2625 2626
		return -EFAULT;
	}

2627 2628 2629 2630 2631 2632 2633 2634 2635
	if (args->flags & I915_EXEC_FENCE_ARRAY) {
		fences = get_fence_array(args, file);
		if (IS_ERR(fences)) {
			kvfree(exec2_list);
			return PTR_ERR(fences);
		}
	}

	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2636 2637 2638 2639 2640 2641 2642 2643

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
2644
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2645 2646
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
2647

2648 2649
		/* Copy the new buffer offsets back to the user's exec list. */
		user_access_begin();
2650
		for (i = 0; i < args->buffer_count; i++) {
2651 2652 2653
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2654
			exec2_list[i].offset =
2655 2656 2657 2658
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
2659
		}
2660 2661
end_user:
		user_access_end();
2662 2663
	}

2664
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2665
	put_fence_array(args, fences);
M
Michal Hocko 已提交
2666
	kvfree(exec2_list);
2667
	return err;
2668
}