i915_gem_execbuffer.c 69.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
31
#include <linux/sync_file.h>
32 33
#include <linux/uaccess.h>

34
#include <drm/drmP.h>
35
#include <drm/drm_syncobj.h>
36
#include <drm/i915_drm.h>
37

38
#include "i915_drv.h"
39
#include "i915_gem_clflush.h"
40 41
#include "i915_trace.h"
#include "intel_drv.h"
42
#include "intel_frontbuffer.h"
43

44 45 46 47 48 49
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
50

51 52 53 54 55 56
#define __EXEC_OBJECT_HAS_REF		BIT(31)
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
57 58 59 60
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
#define __EXEC_VALIDATED	BIT(30)
61
#define __EXEC_INTERNAL_FLAGS	(~0u << 30)
62
#define UPDATE			PIN_OFFSET_FIXED
63 64

#define BATCH_OFFSET_BIAS (256*1024)
65

66
#define __I915_EXEC_ILLEGAL_FLAGS \
67 68 69
	(__I915_EXEC_UNKNOWN_FLAGS | \
	 I915_EXEC_CONSTANTS_MASK  | \
	 I915_EXEC_RESOURCE_STREAMER)
70

71 72 73 74 75 76 77 78 79
/* Catch emission of unexpected errors for CI! */
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#undef EINVAL
#define EINVAL ({ \
	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
	22; \
})
#endif

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
 * At the level of talking to the hardware, submitting a batchbuffer for the
 * GPU to execute is to add content to a buffer from which the HW
 * command streamer is reading.
 *
 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
 *    Execlists, this command is not placed on the same buffer as the
 *    remaining items.
 *
 * 2. Add a command to invalidate caches to the buffer.
 *
 * 3. Add a batchbuffer start command to the buffer; the start command is
 *    essentially a token together with the GPU address of the batchbuffer
 *    to be executed.
 *
 * 4. Add a pipeline flush to the buffer.
 *
 * 5. Add a memory write command to the buffer to record when the GPU
 *    is done executing the batchbuffer. The memory write writes the
 *    global sequence number of the request, ``i915_request::global_seqno``;
 *    the i915 driver uses the current value in the register to determine
 *    if the GPU has completed the batchbuffer.
 *
 * 6. Add a user interrupt command to the buffer. This command instructs
 *    the GPU to issue an interrupt when the command, pipeline flush and
 *    memory write are completed.
 *
 * 7. Inform the hardware of the additional commands added to the buffer
 *    (by updating the tail pointer).
 *
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

231
struct i915_execbuffer {
232 233 234 235
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
236 237
	struct i915_vma **vma;
	unsigned int *flags;
238 239 240 241 242

	struct intel_engine_cs *engine; /** engine to queue the request to */
	struct i915_gem_context *ctx; /** context for building the request */
	struct i915_address_space *vm; /** GTT and vma for the request */

243
	struct i915_request *request; /** our request to build */
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
	struct i915_vma *batch; /** identity of the batch obj/vma */

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
260
	struct reloc_cache {
261 262 263
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
264
		unsigned int gen; /** Cached value of INTEL_GEN */
265
		bool use_64bit_reloc : 1;
266 267 268
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
269

270
		struct i915_request *rq;
271 272
		u32 *rq_cmd;
		unsigned int rq_size;
273
	} reloc_cache;
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
289 290
};

291
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
292

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/*
 * Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline u64 gen8_canonical_addr(u64 address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline u64 gen8_noncanonical_addr(u64 address)
{
	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}

312 313
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
314
	return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
315 316
}

317
static int eb_create(struct i915_execbuffer *eb)
318
{
319 320
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
321

322 323 324 325 326 327 328 329 330 331 332
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
333
		do {
334
			gfp_t flags;
335 336 337 338 339 340 341

			/* While we can still reduce the allocation size, don't
			 * raise a warning and allow the allocation to fail.
			 * On the last pass though, we want to try as hard
			 * as possible to perform the allocation and warn
			 * if it fails.
			 */
342
			flags = GFP_KERNEL;
343 344 345
			if (size > 1)
				flags |= __GFP_NORETRY | __GFP_NOWARN;

346
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
347
					      flags);
348 349 350 351
			if (eb->buckets)
				break;
		} while (--size);

352 353
		if (unlikely(!size))
			return -ENOMEM;
354

355
		eb->lut_size = size;
356
	} else {
357
		eb->lut_size = -eb->buffer_count;
358
	}
359

360
	return 0;
361 362
}

363 364
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
365 366
		 const struct i915_vma *vma,
		 unsigned int flags)
367 368 369 370 371 372 373
{
	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

374
	if (flags & EXEC_OBJECT_PINNED &&
375 376 377
	    vma->node.start != entry->offset)
		return true;

378
	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
379 380 381
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

382
	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
383 384 385
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

386 387 388 389
	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
		return true;

390 391 392
	return false;
}

393
static inline bool
394
eb_pin_vma(struct i915_execbuffer *eb,
395
	   const struct drm_i915_gem_exec_object2 *entry,
396 397
	   struct i915_vma *vma)
{
398 399
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
400

401
	if (vma->node.size)
402
		pin_flags = vma->node.start;
403
	else
404
		pin_flags = entry->offset & PIN_OFFSET_MASK;
405

406 407 408
	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
		pin_flags |= PIN_GLOBAL;
409

410 411
	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
		return false;
412

413
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
414
		if (unlikely(i915_vma_pin_fence(vma))) {
415
			i915_vma_unpin(vma);
416
			return false;
417 418
		}

419
		if (vma->fence)
420
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
421 422
	}

423 424
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	return !eb_vma_misplaced(entry, vma, exec_flags);
425 426
}

427
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
428
{
429
	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
430

431
	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
432
		__i915_vma_unpin_fence(vma);
433

434
	__i915_vma_unpin(vma);
435 436
}

437
static inline void
438
eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
439
{
440
	if (!(*flags & __EXEC_OBJECT_HAS_PIN))
441
		return;
442

443 444
	__eb_unreserve_vma(vma, *flags);
	*flags &= ~__EXEC_OBJECT_RESERVED;
445 446
}

447 448 449 450
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
451
{
452 453
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
454

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
		     entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
472 473
	}

474
	if (unlikely(vma->exec_flags)) {
475 476 477 478 479 480 481 482 483 484 485 486
		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
			  entry->handle, (int)(entry - eb->exec));
		return -EINVAL;
	}

	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

487 488 489 490 491 492 493 494 495 496 497 498
	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

499
	return 0;
500 501
}

502
static int
503 504 505
eb_add_vma(struct i915_execbuffer *eb,
	   unsigned int i, unsigned batch_idx,
	   struct i915_vma *vma)
506
{
507
	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
508 509 510 511 512 513 514 515
	int err;

	GEM_BUG_ON(i915_vma_is_closed(vma));

	if (!(eb->args->flags & __EXEC_VALIDATED)) {
		err = eb_validate_vma(eb, entry, vma);
		if (unlikely(err))
			return err;
516 517
	}

518
	if (eb->lut_size > 0) {
519
		vma->exec_handle = entry->handle;
520
		hlist_add_head(&vma->exec_node,
521 522
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
523
	}
524

525 526 527 528 529 530 531 532 533
	if (entry->relocation_count)
		list_add_tail(&vma->reloc_link, &eb->relocs);

	/*
	 * Stash a pointer from the vma to execobj, so we can query its flags,
	 * size, alignment etc as provided by the user. Also we stash a pointer
	 * to the vma inside the execobj so that we can use a direct lookup
	 * to find the right target VMA when doing relocations.
	 */
534
	eb->vma[i] = vma;
535
	eb->flags[i] = entry->flags;
536
	vma->exec_flags = &eb->flags[i];
537

538 539 540 541 542 543 544 545 546 547
	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	if (i == batch_idx) {
548 549
		if (entry->relocation_count &&
		    !(eb->flags[i] & EXEC_OBJECT_PINNED))
550 551 552 553 554 555 556
			eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
		if (eb->reloc_cache.has_fence)
			eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;

		eb->batch = vma;
	}

557
	err = 0;
558
	if (eb_pin_vma(eb, entry, vma)) {
559 560 561 562
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
563 564 565 566 567 568
	} else {
		eb_unreserve_vma(vma, vma->exec_flags);

		list_add_tail(&vma->exec_link, &eb->unbound);
		if (drm_mm_node_allocated(&vma->node))
			err = i915_vma_unbind(vma);
569 570
		if (unlikely(err))
			vma->exec_flags = NULL;
571 572 573 574 575 576 577 578 579 580
	}
	return err;
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

581 582 583 584 585
	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;
586 587 588 589 590 591 592 593 594

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
			  struct i915_vma *vma)
{
595 596 597
	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
598 599
	int err;

600 601 602
	pin_flags = PIN_USER | PIN_NONBLOCK;
	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
		pin_flags |= PIN_GLOBAL;
603 604 605 606 607

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
608 609
	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		pin_flags |= PIN_ZONE_4G;
610

611 612
	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
		pin_flags |= PIN_MAPPABLE;
613

614 615 616 617 618
	if (exec_flags & EXEC_OBJECT_PINNED) {
		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
		pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
	} else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
619 620
	}

621 622 623
	err = i915_vma_pin(vma,
			   entry->pad_to_size, entry->alignment,
			   pin_flags);
624 625 626 627 628 629 630 631
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

632
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
633
		err = i915_vma_pin_fence(vma);
634 635 636 637 638
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

639
		if (vma->fence)
640
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
641 642
	}

643 644
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
645

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	struct list_head last;
	struct i915_vma *vma;
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
		list_for_each_entry(vma, &eb->unbound, exec_link) {
			err = eb_reserve_vma(eb, vma);
			if (err)
				break;
		}
		if (err != -ENOSPC)
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
686 687
			unsigned int flags = eb->flags[i];
			struct i915_vma *vma = eb->vma[i];
688

689 690
			if (flags & EXEC_OBJECT_PINNED &&
			    flags & __EXEC_OBJECT_HAS_PIN)
691 692
				continue;

693
			eb_unreserve_vma(vma, &eb->flags[i]);
694

695
			if (flags & EXEC_OBJECT_PINNED)
696
				list_add(&vma->exec_link, &eb->unbound);
697
			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
				list_add_tail(&vma->exec_link, &eb->unbound);
			else
				list_add_tail(&vma->exec_link, &last);
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
			err = i915_gem_evict_vm(eb->vm);
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
	} while (1);
719
}
720

721 722
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
723 724 725 726
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
727 728 729 730 731 732 733
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
734 735
	if (unlikely(!ctx))
		return -ENOENT;
736

737
	eb->ctx = ctx;
738 739 740 741 742 743
	if (ctx->ppgtt) {
		eb->vm = &ctx->ppgtt->vm;
		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
	} else {
		eb->vm = &eb->i915->ggtt.vm;
	}
744 745 746 747 748 749 750 751 752

	eb->context_flags = 0;
	if (ctx->flags & CONTEXT_NO_ZEROMAP)
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
753
{
754
	struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
755
	struct drm_i915_gem_object *obj;
756
	unsigned int i, batch;
757
	int err;
758

759 760 761 762 763 764
	if (unlikely(i915_gem_context_is_closed(eb->ctx)))
		return -ENOENT;

	if (unlikely(i915_gem_context_is_banned(eb->ctx)))
		return -EIO;

765 766
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
767

768 769
	batch = eb_batch_index(eb);

770 771
	for (i = 0; i < eb->buffer_count; i++) {
		u32 handle = eb->exec[i].handle;
772
		struct i915_lut_handle *lut;
773
		struct i915_vma *vma;
774

775 776
		vma = radix_tree_lookup(handles_vma, handle);
		if (likely(vma))
777
			goto add_vma;
778

779
		obj = i915_gem_object_lookup(eb->file, handle);
780
		if (unlikely(!obj)) {
781
			err = -ENOENT;
782
			goto err_vma;
783 784
		}

785
		vma = i915_vma_instance(obj, eb->vm, NULL);
C
Chris Wilson 已提交
786
		if (unlikely(IS_ERR(vma))) {
787
			err = PTR_ERR(vma);
788
			goto err_obj;
789 790
		}

791 792 793 794 795 796 797 798
		lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
		if (unlikely(!lut)) {
			err = -ENOMEM;
			goto err_obj;
		}

		err = radix_tree_insert(handles_vma, handle, vma);
		if (unlikely(err)) {
799
			kmem_cache_free(eb->i915->luts, lut);
800
			goto err_obj;
801
		}
802

803
		/* transfer ref to ctx */
804 805
		if (!vma->open_count++)
			i915_vma_reopen(vma);
806 807 808 809 810
		list_add(&lut->obj_link, &obj->lut_list);
		list_add(&lut->ctx_link, &eb->ctx->handles_list);
		lut->ctx = eb->ctx;
		lut->handle = handle;

811
add_vma:
812
		err = eb_add_vma(eb, i, batch, vma);
813
		if (unlikely(err))
814
			goto err_vma;
815

816 817
		GEM_BUG_ON(vma != eb->vma[i]);
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
818 819
		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
			   eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
820 821
	}

822 823 824
	eb->args->flags |= __EXEC_VALIDATED;
	return eb_reserve(eb);

825
err_obj:
826
	i915_gem_object_put(obj);
827 828
err_vma:
	eb->vma[i] = NULL;
829
	return err;
830 831
}

832
static struct i915_vma *
833
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
834
{
835 836
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
837
			return NULL;
838
		return eb->vma[handle];
839 840
	} else {
		struct hlist_head *head;
841
		struct i915_vma *vma;
842

843
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
844
		hlist_for_each_entry(vma, head, exec_node) {
845 846
			if (vma->exec_handle == handle)
				return vma;
847 848 849
		}
		return NULL;
	}
850 851
}

852
static void eb_release_vmas(const struct i915_execbuffer *eb)
853
{
854 855 856 857
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
858 859
		struct i915_vma *vma = eb->vma[i];
		unsigned int flags = eb->flags[i];
860

861
		if (!vma)
862
			break;
863

864 865 866
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
		vma->exec_flags = NULL;
		eb->vma[i] = NULL;
867

868 869
		if (flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, flags);
870

871
		if (flags & __EXEC_OBJECT_HAS_REF)
872
			i915_vma_put(vma);
873
	}
874 875
}

876
static void eb_reset_vmas(const struct i915_execbuffer *eb)
877
{
878
	eb_release_vmas(eb);
879
	if (eb->lut_size > 0)
880 881
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_size);
882 883
}

884
static void eb_destroy(const struct i915_execbuffer *eb)
885
{
886 887
	GEM_BUG_ON(eb->reloc_cache.rq);

888
	if (eb->lut_size > 0)
889
		kfree(eb->buckets);
890 891
}

892
static inline u64
893
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
894
		  const struct i915_vma *target)
895
{
896
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
897 898
}

899 900
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
901
{
902
	cache->page = -1;
903
	cache->vaddr = 0;
904
	/* Must be a variable in the struct to allow GCC to unroll. */
905
	cache->gen = INTEL_GEN(i915);
906
	cache->has_llc = HAS_LLC(i915);
907
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
908 909
	cache->has_fence = cache->gen < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
910
	cache->node.allocated = false;
911 912
	cache->rq = NULL;
	cache->rq_size = 0;
913
}
914

915 916 917 918 919 920 921 922
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
923 924
}

925 926
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

927 928 929 930 931 932 933
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

934 935 936 937 938 939 940
static void reloc_gpu_flush(struct reloc_cache *cache)
{
	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
	i915_gem_object_unpin_map(cache->rq->batch->obj);
	i915_gem_chipset_flush(cache->rq->i915);

941
	i915_request_add(cache->rq);
942 943 944
	cache->rq = NULL;
}

945
static void reloc_cache_reset(struct reloc_cache *cache)
946
{
947
	void *vaddr;
948

949 950 951
	if (cache->rq)
		reloc_gpu_flush(cache);

952 953
	if (!cache->vaddr)
		return;
954

955 956 957 958
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
959

960 961 962
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
963
		wmb();
964
		io_mapping_unmap_atomic((void __iomem *)vaddr);
965
		if (cache->node.allocated) {
966
			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
967

968 969 970
			ggtt->vm.clear_range(&ggtt->vm,
					     cache->node.start,
					     cache->node.size);
971 972 973
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
974
		}
975
	}
976 977 978

	cache->vaddr = 0;
	cache->page = -1;
979 980 981 982
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
983
			unsigned long page)
984
{
985 986 987 988 989 990
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
991
		int err;
992

993 994 995
		err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
		if (err)
			return ERR_PTR(err);
996 997 998

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
999

1000 1001 1002 1003
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
1004 1005
	}

1006 1007
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1008
	cache->page = page;
1009

1010
	return vaddr;
1011 1012
}

1013 1014
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1015
			 unsigned long page)
1016
{
1017
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1018
	unsigned long offset;
1019
	void *vaddr;
1020

1021
	if (cache->vaddr) {
1022
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1023 1024
	} else {
		struct i915_vma *vma;
1025
		int err;
1026

1027
		if (use_cpu_reloc(cache, obj))
1028
			return NULL;
1029

1030 1031 1032
		err = i915_gem_object_set_to_gtt_domain(obj, true);
		if (err)
			return ERR_PTR(err);
1033

1034
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1035 1036 1037
					       PIN_MAPPABLE |
					       PIN_NONBLOCK |
					       PIN_NONFAULT);
1038 1039
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1040
			err = drm_mm_insert_node_in_range
1041
				(&ggtt->vm.mm, &cache->node,
1042
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1043
				 0, ggtt->mappable_end,
1044
				 DRM_MM_INSERT_LOW);
1045
			if (err) /* no inactive aperture space, use cpu reloc */
1046
				return NULL;
1047
		} else {
1048 1049
			err = i915_vma_put_fence(vma);
			if (err) {
1050
				i915_vma_unpin(vma);
1051
				return ERR_PTR(err);
1052
			}
1053

1054 1055
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1056
		}
1057
	}
1058

1059 1060
	offset = cache->node.start;
	if (cache->node.allocated) {
1061
		wmb();
1062 1063 1064
		ggtt->vm.insert_page(&ggtt->vm,
				     i915_gem_object_get_dma_address(obj, page),
				     offset, I915_CACHE_NONE, 0);
1065 1066
	} else {
		offset += page << PAGE_SHIFT;
1067 1068
	}

1069
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1070
							 offset);
1071 1072
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1073

1074
	return vaddr;
1075 1076
}

1077 1078
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1079
			 unsigned long page)
1080
{
1081
	void *vaddr;
1082

1083 1084 1085 1086 1087 1088 1089 1090
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1091 1092
	}

1093
	return vaddr;
1094 1095
}

1096
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1097
{
1098 1099 1100 1101 1102
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1103

1104
		*addr = value;
1105

1106 1107
		/*
		 * Writes to the same cacheline are serialised by the CPU
1108 1109 1110 1111 1112 1113 1114 1115 1116
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1117 1118
}

1119 1120 1121 1122 1123 1124
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     struct i915_vma *vma,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	struct drm_i915_gem_object *obj;
1125
	struct i915_request *rq;
1126 1127 1128 1129
	struct i915_vma *batch;
	u32 *cmd;
	int err;

1130
	GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
1131 1132 1133 1134 1135 1136

	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	cmd = i915_gem_object_pin_map(obj,
1137 1138 1139
				      cache->has_llc ?
				      I915_MAP_FORCE_WB :
				      I915_MAP_FORCE_WC);
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
	i915_gem_object_unpin_pages(obj);
	if (IS_ERR(cmd))
		return PTR_ERR(cmd);

	err = i915_gem_object_set_to_wc_domain(obj, false);
	if (err)
		goto err_unmap;

	batch = i915_vma_instance(obj, vma->vm, NULL);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
	}

	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
	if (err)
		goto err_unmap;

1158
	rq = i915_request_alloc(eb->engine, eb->ctx);
1159 1160 1161 1162 1163
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

1164
	err = i915_request_await_object(rq, vma->obj, true);
1165 1166 1167 1168 1169 1170 1171 1172 1173
	if (err)
		goto err_request;

	err = eb->engine->emit_bb_start(rq,
					batch->node.start, PAGE_SIZE,
					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
	if (err)
		goto err_request;

1174
	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
1175 1176 1177
	err = i915_vma_move_to_active(batch, rq, 0);
	if (err)
		goto skip_request;
1178

1179 1180 1181
	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
	if (err)
		goto skip_request;
1182 1183

	rq->batch = batch;
1184
	i915_vma_unpin(batch);
1185 1186 1187 1188 1189 1190 1191 1192

	cache->rq = rq;
	cache->rq_cmd = cmd;
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
	return 0;

1193 1194
skip_request:
	i915_request_skip(rq, err);
1195
err_request:
1196
	i915_request_add(rq);
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
	i915_gem_object_unpin_map(obj);
	return err;
}

static u32 *reloc_gpu(struct i915_execbuffer *eb,
		      struct i915_vma *vma,
		      unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	u32 *cmd;

	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
		reloc_gpu_flush(cache);

	if (unlikely(!cache->rq)) {
		int err;

1217 1218 1219 1220
		/* If we need to copy for the cmdparser, we will stall anyway */
		if (eb_use_cmdparser(eb))
			return ERR_PTR(-EWOULDBLOCK);

1221 1222 1223
		if (!intel_engine_can_store_dword(eb->engine))
			return ERR_PTR(-ENODEV);

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
		err = __reloc_gpu_alloc(eb, vma, len);
		if (unlikely(err))
			return ERR_PTR(err);
	}

	cmd = cache->rq_cmd + cache->rq_size;
	cache->rq_size += len;

	return cmd;
}

1235 1236
static u64
relocate_entry(struct i915_vma *vma,
1237
	       const struct drm_i915_gem_relocation_entry *reloc,
1238 1239
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1240
{
1241
	u64 offset = reloc->offset;
1242 1243
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1244
	void *vaddr;
1245

1246 1247
	if (!eb->reloc_cache.vaddr &&
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1248
	     !reservation_object_test_signaled_rcu(vma->resv, true))) {
1249 1250 1251 1252 1253 1254 1255 1256 1257
		const unsigned int gen = eb->reloc_cache.gen;
		unsigned int len;
		u32 *batch;
		u64 addr;

		if (wide)
			len = offset & 7 ? 8 : 5;
		else if (gen >= 4)
			len = 4;
1258
		else
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
			len = 3;

		batch = reloc_gpu(eb, vma, len);
		if (IS_ERR(batch))
			goto repeat;

		addr = gen8_canonical_addr(vma->node.start + offset);
		if (wide) {
			if (offset & 7) {
				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);

				addr = gen8_canonical_addr(addr + 4);

				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = upper_32_bits(target_offset);
			} else {
				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);
				*batch++ = upper_32_bits(target_offset);
			}
		} else if (gen >= 6) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else if (gen >= 4) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else {
			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
			*batch++ = addr;
			*batch++ = target_offset;
		}

		goto out;
	}

1305
repeat:
1306
	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1307 1308 1309 1310 1311
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1312
			eb->reloc_cache.vaddr);
1313 1314 1315 1316 1317 1318

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1319 1320
	}

1321
out:
1322
	return target->node.start | UPDATE;
1323 1324
}

1325 1326 1327 1328
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
		  struct i915_vma *vma,
		  const struct drm_i915_gem_relocation_entry *reloc)
1329
{
1330
	struct i915_vma *target;
1331
	int err;
1332

1333
	/* we've already hold a reference to all valid objects */
1334 1335
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1336
		return -ENOENT;
1337

1338
	/* Validate that the target is in a valid r/w GPU domain */
1339
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1340
		DRM_DEBUG("reloc with multiple write domains: "
1341
			  "target %d offset %d "
1342
			  "read %08x write %08x",
1343
			  reloc->target_handle,
1344 1345 1346
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1347
		return -EINVAL;
1348
	}
1349 1350
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1351
		DRM_DEBUG("reloc with read/write non-GPU domains: "
1352
			  "target %d offset %d "
1353
			  "read %08x write %08x",
1354
			  reloc->target_handle,
1355 1356 1357
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1358
		return -EINVAL;
1359 1360
	}

1361
	if (reloc->write_domain) {
1362
		*target->exec_flags |= EXEC_OBJECT_WRITE;
1363

1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
		    IS_GEN6(eb->i915)) {
			err = i915_vma_bind(target, target->obj->cache_level,
					    PIN_GLOBAL);
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1378
	}
1379

1380 1381
	/*
	 * If the relocation already has the right value in it, no
1382 1383
	 * more work needs to be done.
	 */
1384 1385
	if (!DBG_FORCE_RELOC &&
	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1386
		return 0;
1387 1388

	/* Check that the relocation address is valid... */
1389
	if (unlikely(reloc->offset >
1390
		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1391
		DRM_DEBUG("Relocation beyond object bounds: "
1392 1393 1394 1395
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
			  (int)vma->size);
1396
		return -EINVAL;
1397
	}
1398
	if (unlikely(reloc->offset & 3)) {
1399
		DRM_DEBUG("Relocation not 4-byte aligned: "
1400 1401 1402
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1403
		return -EINVAL;
1404 1405
	}

1406 1407 1408 1409 1410 1411
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
1412
	 * out of our synchronisation.
1413
	 */
1414
	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1415

1416
	/* and update the user's relocation entry */
1417
	return relocate_entry(vma, reloc, eb, target);
1418 1419
}

1420
static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1421
{
1422
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1423 1424
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
1425
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1426
	unsigned int remain;
1427

1428
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1429
	remain = entry->relocation_count;
1430 1431
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1432

1433 1434 1435 1436 1437
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
1438
	if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
1439 1440 1441 1442 1443 1444 1445
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1446

1447 1448
		/*
		 * This is the fast path and we cannot handle a pagefault
1449 1450 1451 1452 1453 1454 1455
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1456
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1457
		pagefault_enable();
1458 1459
		if (unlikely(copied)) {
			remain = -EFAULT;
1460 1461
			goto out;
		}
1462

1463
		remain -= count;
1464
		do {
1465
			u64 offset = eb_relocate_entry(eb, vma, r);
1466

1467 1468 1469
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1470
				goto out;
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
				__put_user(offset,
					   &urelocs[r-stack].presumed_offset);
1496
			}
1497 1498 1499
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1500
out:
1501
	reloc_cache_reset(&eb->reloc_cache);
1502
	return remain;
1503 1504 1505
}

static int
1506
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1507
{
1508
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1509 1510 1511 1512
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1513 1514

	for (i = 0; i < entry->relocation_count; i++) {
1515
		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1516

1517 1518 1519 1520
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1521
	}
1522 1523 1524 1525
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1526 1527
}

1528
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1529
{
1530 1531 1532
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1533

1534 1535 1536
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1537

1538 1539
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1540

1541 1542 1543 1544
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
	if (!access_ok(VERIFY_READ, addr, size))
		return -EFAULT;
1545

1546 1547 1548 1549 1550
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1551
	}
1552
	return __get_user(c, end - 1);
1553
}
1554

1555
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1556
{
1557 1558 1559
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1560

1561 1562 1563 1564 1565 1566
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		struct drm_i915_gem_relocation_entry *relocs;
		unsigned long size;
		unsigned long copied;
1567

1568 1569
		if (nreloc == 0)
			continue;
1570

1571 1572 1573
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1574

1575 1576
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1577

1578
		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1579 1580 1581 1582 1583
		if (!relocs) {
			kvfree(relocs);
			err = -ENOMEM;
			goto err;
		}
1584

1585 1586 1587 1588 1589 1590 1591
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
1592
					     (char __user *)urelocs + copied,
1593 1594 1595 1596 1597
					     len)) {
				kvfree(relocs);
				err = -EFAULT;
				goto err;
			}
1598

1599 1600
			copied += len;
		} while (copied < size);
1601

1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		user_access_begin();
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
end_user:
		user_access_end();
1619

1620 1621
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1622

1623
	return 0;
1624

1625 1626 1627 1628 1629 1630 1631 1632
err:
	while (i--) {
		struct drm_i915_gem_relocation_entry *relocs =
			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1633 1634
}

1635
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1636
{
1637 1638
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1639

1640
	if (unlikely(i915_modparams.prefault_disable))
1641
		return 0;
1642

1643 1644
	for (i = 0; i < count; i++) {
		int err;
1645

1646 1647 1648 1649
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1650

1651
	return 0;
1652 1653
}

1654
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1655
{
1656
	struct drm_device *dev = &eb->i915->drm;
1657
	bool have_copy = false;
1658
	struct i915_vma *vma;
1659 1660 1661 1662 1663 1664 1665
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1666

1667
	/* We may process another execbuffer during the unlock... */
1668
	eb_reset_vmas(eb);
1669 1670
	mutex_unlock(&dev->struct_mutex);

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1692
	}
1693 1694 1695
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1696 1697
	}

1698 1699 1700
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1701 1702
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1703
		mutex_lock(&dev->struct_mutex);
1704
		goto out;
1705 1706
	}

1707
	/* reacquire the objects */
1708 1709
	err = eb_lookup_vmas(eb);
	if (err)
1710
		goto err;
1711

1712 1713
	GEM_BUG_ON(!eb->batch);

1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
	list_for_each_entry(vma, &eb->relocs, reloc_link) {
		if (!have_copy) {
			pagefault_disable();
			err = eb_relocate_vma(eb, vma);
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
			err = eb_relocate_vma_slow(eb, vma);
			if (err)
				goto err;
		}
1726 1727
	}

1728 1729
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1730 1731 1732 1733 1734 1735
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

1757
	return err;
1758 1759
}

1760
static int eb_relocate(struct i915_execbuffer *eb)
1761
{
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
	if (eb_lookup_vmas(eb))
		goto slow;

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, vma))
				goto slow;
		}
	}

	return 0;

slow:
	return eb_relocate_slow(eb);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1786

1787
	for (i = 0; i < count; i++) {
1788 1789
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];
1790
		struct drm_i915_gem_object *obj = vma->obj;
1791

1792
		if (flags & EXEC_OBJECT_CAPTURE) {
1793
			struct i915_capture_list *capture;
1794 1795 1796 1797 1798

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (unlikely(!capture))
				return -ENOMEM;

1799
			capture->next = eb->request->capture_list;
1800
			capture->vma = eb->vma[i];
1801
			eb->request->capture_list = capture;
1802 1803
		}

1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
		/*
		 * If the GPU is not _reading_ through the CPU cache, we need
		 * to make sure that any writes (both previous GPU writes from
		 * before a change in snooping levels and normal CPU writes)
		 * caught in that cache are flushed to main memory.
		 *
		 * We want to say
		 *   obj->cache_dirty &&
		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		 * but gcc's optimiser doesn't handle that as well and emits
		 * two jumps instead of one. Maybe one day...
		 */
		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1817
			if (i915_gem_clflush_object(obj, 0))
1818
				flags &= ~EXEC_OBJECT_ASYNC;
1819 1820
		}

1821 1822
		if (flags & EXEC_OBJECT_ASYNC)
			continue;
1823

1824
		err = i915_request_await_object
1825
			(eb->request, obj, flags & EXEC_OBJECT_WRITE);
1826 1827 1828 1829 1830
		if (err)
			return err;
	}

	for (i = 0; i < count; i++) {
1831 1832 1833
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];

1834 1835 1836 1837 1838
		err = i915_vma_move_to_active(vma, eb->request, flags);
		if (unlikely(err)) {
			i915_request_skip(eb->request, err);
			return err;
		}
1839

1840 1841 1842 1843
		__eb_unreserve_vma(vma, flags);
		vma->exec_flags = NULL;

		if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1844
			i915_vma_put(vma);
1845
	}
1846
	eb->exec = NULL;
1847

1848
	/* Unconditionally flush any chipset caches (for streaming writes). */
1849
	i915_gem_chipset_flush(eb->i915);
1850

1851
	return 0;
1852 1853
}

1854
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1855
{
1856
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1857 1858
		return false;

C
Chris Wilson 已提交
1859
	/* Kernel clipping was a DRI1 misfeature */
1860 1861 1862 1863
	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
		if (exec->num_cliprects || exec->cliprects_ptr)
			return false;
	}
C
Chris Wilson 已提交
1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1876 1877
}

1878
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1879
{
1880 1881
	u32 *cs;
	int i;
1882

1883
	if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
1884 1885 1886
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1887

1888
	cs = intel_ring_begin(rq, 4 * 2 + 2);
1889 1890
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1891

1892
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1893
	for (i = 0; i < 4; i++) {
1894 1895
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1896
	}
1897
	*cs++ = MI_NOOP;
1898
	intel_ring_advance(rq, cs);
1899 1900 1901 1902

	return 0;
}

1903
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1904 1905
{
	struct drm_i915_gem_object *shadow_batch_obj;
1906
	struct i915_vma *vma;
1907
	int err;
1908

1909 1910
	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
1911
	if (IS_ERR(shadow_batch_obj))
1912
		return ERR_CAST(shadow_batch_obj);
1913

1914
	err = intel_engine_cmd_parser(eb->engine,
1915
				      eb->batch->obj,
1916
				      shadow_batch_obj,
1917 1918
				      eb->batch_start_offset,
				      eb->batch_len,
1919
				      is_master);
1920 1921
	if (err) {
		if (err == -EACCES) /* unhandled chained batch */
C
Chris Wilson 已提交
1922 1923
			vma = NULL;
		else
1924
			vma = ERR_PTR(err);
C
Chris Wilson 已提交
1925 1926
		goto out;
	}
1927

C
Chris Wilson 已提交
1928 1929 1930
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
1931

1932 1933 1934 1935 1936
	eb->vma[eb->buffer_count] = i915_vma_get(vma);
	eb->flags[eb->buffer_count] =
		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
	vma->exec_flags = &eb->flags[eb->buffer_count];
	eb->buffer_count++;
1937

C
Chris Wilson 已提交
1938
out:
C
Chris Wilson 已提交
1939
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
1940
	return vma;
1941
}
1942

1943
static void
1944
add_to_client(struct i915_request *rq, struct drm_file *file)
1945
{
1946 1947
	rq->file_priv = file->driver_priv;
	list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
1948 1949
}

1950
static int eb_submit(struct i915_execbuffer *eb)
1951
{
1952
	int err;
1953

1954 1955 1956
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
1957

1958
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
1959 1960 1961
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
1962 1963
	}

1964
	err = eb->engine->emit_bb_start(eb->request,
1965 1966 1967
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
1968 1969 1970
					eb->batch_flags);
	if (err)
		return err;
1971

C
Chris Wilson 已提交
1972
	return 0;
1973 1974
}

1975
/*
1976
 * Find one BSD ring to dispatch the corresponding BSD command.
1977
 * The engine index is returned.
1978
 */
1979
static unsigned int
1980 1981
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
1982 1983 1984
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

1985
	/* Check whether the file_priv has already selected one ring. */
1986 1987 1988
	if ((int)file_priv->bsd_engine < 0)
		file_priv->bsd_engine = atomic_fetch_xor(1,
			 &dev_priv->mm.bsd_engine_dispatch_index);
1989

1990
	return file_priv->bsd_engine;
1991 1992
}

1993 1994
#define I915_USER_RINGS (4)

1995
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1996 1997 1998 1999 2000 2001 2002
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

2003 2004 2005 2006
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
2007 2008
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2009
	struct intel_engine_cs *engine;
2010 2011 2012

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
2013
		return NULL;
2014 2015 2016 2017 2018 2019
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
2020
		return NULL;
2021 2022 2023 2024 2025 2026
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2027
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
2028 2029
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2030
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2031 2032 2033 2034
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
2035
			return NULL;
2036 2037
		}

2038
		engine = dev_priv->engine[_VCS(bsd_idx)];
2039
	} else {
2040
		engine = dev_priv->engine[user_ring_map[user_ring_id]];
2041 2042
	}

2043
	if (!engine) {
2044
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
2045
		return NULL;
2046 2047
	}

2048
	return engine;
2049 2050
}

2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
static void
__free_fence_array(struct drm_syncobj **fences, unsigned int n)
{
	while (n--)
		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
	kvfree(fences);
}

static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_file *file)
{
2063
	const unsigned long nfences = args->num_cliprects;
2064 2065
	struct drm_i915_gem_exec_fence __user *user;
	struct drm_syncobj **fences;
2066
	unsigned long n;
2067 2068 2069 2070 2071
	int err;

	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
		return NULL;

2072 2073 2074 2075 2076
	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
	if (nfences > min_t(unsigned long,
			    ULONG_MAX / sizeof(*user),
			    SIZE_MAX / sizeof(*fences)))
2077 2078 2079
		return ERR_PTR(-EINVAL);

	user = u64_to_user_ptr(args->cliprects_ptr);
2080
	if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user)))
2081 2082
		return ERR_PTR(-EFAULT);

2083
	fences = kvmalloc_array(nfences, sizeof(*fences),
2084
				__GFP_NOWARN | GFP_KERNEL);
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
	if (!fences)
		return ERR_PTR(-ENOMEM);

	for (n = 0; n < nfences; n++) {
		struct drm_i915_gem_exec_fence fence;
		struct drm_syncobj *syncobj;

		if (__copy_from_user(&fence, user++, sizeof(fence))) {
			err = -EFAULT;
			goto err;
		}

2097 2098 2099 2100 2101
		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
			err = -EINVAL;
			goto err;
		}

2102 2103 2104 2105 2106 2107 2108
		syncobj = drm_syncobj_find(file, fence.handle);
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
			err = -ENOENT;
			goto err;
		}

2109 2110 2111
		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);

2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
	}

	return fences;

err:
	__free_fence_array(fences, n);
	return ERR_PTR(err);
}

static void
put_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_syncobj **fences)
{
	if (fences)
		__free_fence_array(fences, args->num_cliprects);
}

static int
await_fence_array(struct i915_execbuffer *eb,
		  struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	unsigned int n;
	int err;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		struct dma_fence *fence;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_WAIT))
			continue;

J
Jason Ekstrand 已提交
2147
		fence = drm_syncobj_fence_get(syncobj);
2148 2149 2150
		if (!fence)
			return -EINVAL;

2151
		err = i915_request_await_dma_fence(eb->request, fence);
2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
		dma_fence_put(fence);
		if (err < 0)
			return err;
	}

	return 0;
}

static void
signal_fence_array(struct i915_execbuffer *eb,
		   struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	struct dma_fence * const fence = &eb->request->fence;
	unsigned int n;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_SIGNAL))
			continue;

		drm_syncobj_replace_fence(syncobj, fence);
	}
}

2180
static int
2181
i915_gem_do_execbuffer(struct drm_device *dev,
2182 2183
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
2184 2185
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_syncobj **fences)
2186
{
2187
	struct i915_execbuffer eb;
2188 2189 2190
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
2191
	int err;
2192

2193
	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2194 2195
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2196

2197 2198 2199
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
2200
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2201
		args->flags |= __EXEC_HAS_RELOC;
2202

2203
	eb.exec = exec;
2204 2205
	eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
	eb.vma[0] = NULL;
2206 2207
	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);

2208
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2209 2210
	reloc_cache_init(&eb.reloc_cache, eb.i915);

2211
	eb.buffer_count = args->buffer_count;
2212 2213 2214
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

2215
	eb.batch_flags = 0;
2216
	if (args->flags & I915_EXEC_SECURE) {
2217
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2218 2219
		    return -EPERM;

2220
		eb.batch_flags |= I915_DISPATCH_SECURE;
2221
	}
2222
	if (args->flags & I915_EXEC_IS_PINNED)
2223
		eb.batch_flags |= I915_DISPATCH_PINNED;
2224

2225 2226
	eb.engine = eb_select_engine(eb.i915, file, args);
	if (!eb.engine)
2227 2228
		return -EINVAL;

2229 2230
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2231 2232
		if (!in_fence)
			return -EINVAL;
2233 2234 2235 2236 2237
	}

	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
2238
			err = out_fence_fd;
2239
			goto err_in_fence;
2240 2241 2242
		}
	}

2243 2244 2245 2246 2247
	err = eb_create(&eb);
	if (err)
		goto err_out_fence;

	GEM_BUG_ON(!eb.lut_size);
2248

2249 2250 2251 2252
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_destroy;

2253 2254
	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
2255 2256 2257 2258 2259
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
2260
	intel_runtime_pm_get(eb.i915);
2261

2262 2263 2264
	err = i915_mutex_lock_interruptible(dev);
	if (err)
		goto err_rpm;
2265

2266
	err = eb_relocate(&eb);
2267
	if (err) {
2268 2269 2270 2271 2272 2273 2274 2275 2276
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
		goto err_vma;
2277
	}
2278

2279
	if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2280
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2281 2282
		err = -EINVAL;
		goto err_vma;
2283
	}
2284 2285
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2286
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2287 2288
		err = -EINVAL;
		goto err_vma;
2289
	}
2290

2291
	if (eb_use_cmdparser(&eb)) {
2292 2293
		struct i915_vma *vma;

2294
		vma = eb_parse(&eb, drm_is_current_master(file));
2295
		if (IS_ERR(vma)) {
2296 2297
			err = PTR_ERR(vma);
			goto err_vma;
2298
		}
2299

2300
		if (vma) {
2301 2302 2303 2304 2305 2306 2307 2308 2309
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
2310
			eb.batch_flags |= I915_DISPATCH_SECURE;
2311 2312
			eb.batch_start_offset = 0;
			eb.batch = vma;
2313
		}
2314 2315
	}

2316 2317
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
2318

2319 2320
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2321
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2322
	 * hsw should have this fixed, but bdw mucks it up again. */
2323
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2324
		struct i915_vma *vma;
2325

2326 2327 2328 2329 2330 2331
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2332
		 *   so we don't really have issues with multiple objects not
2333 2334 2335
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2336
		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2337
		if (IS_ERR(vma)) {
2338 2339
			err = PTR_ERR(vma);
			goto err_vma;
C
Chris Wilson 已提交
2340
		}
2341

2342
		eb.batch = vma;
2343
	}
2344

2345 2346 2347
	/* All GPU relocation batches must be submitted prior to the user rq */
	GEM_BUG_ON(eb.reloc_cache.rq);

2348
	/* Allocate a request for this batch buffer nice and early. */
2349
	eb.request = i915_request_alloc(eb.engine, eb.ctx);
2350
	if (IS_ERR(eb.request)) {
2351
		err = PTR_ERR(eb.request);
2352
		goto err_batch_unpin;
2353
	}
2354

2355
	if (in_fence) {
2356
		err = i915_request_await_dma_fence(eb.request, in_fence);
2357
		if (err < 0)
2358 2359 2360
			goto err_request;
	}

2361 2362 2363 2364 2365 2366
	if (fences) {
		err = await_fence_array(&eb, fences);
		if (err)
			goto err_request;
	}

2367
	if (out_fence_fd != -1) {
2368
		out_fence = sync_file_create(&eb.request->fence);
2369
		if (!out_fence) {
2370
			err = -ENOMEM;
2371 2372 2373 2374
			goto err_request;
		}
	}

2375 2376
	/*
	 * Whilst this request exists, batch_obj will be on the
2377 2378 2379 2380 2381
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2382
	eb.request->batch = eb.batch;
2383

2384
	trace_i915_request_queue(eb.request, eb.batch_flags);
2385
	err = eb_submit(&eb);
2386
err_request:
2387
	i915_request_add(eb.request);
2388
	add_to_client(eb.request, file);
2389

2390 2391 2392
	if (fences)
		signal_fence_array(&eb, fences);

2393
	if (out_fence) {
2394
		if (err == 0) {
2395
			fd_install(out_fence_fd, out_fence->file);
2396
			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2397 2398 2399 2400 2401 2402
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2403

2404
err_batch_unpin:
2405
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2406
		i915_vma_unpin(eb.batch);
2407 2408 2409
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
2410
	mutex_unlock(&dev->struct_mutex);
2411
err_rpm:
2412
	intel_runtime_pm_put(eb.i915);
2413 2414
	i915_gem_context_put(eb.ctx);
err_destroy:
2415
	eb_destroy(&eb);
2416
err_out_fence:
2417 2418
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2419
err_in_fence:
2420
	dma_fence_put(in_fence);
2421
	return err;
2422 2423
}

2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
static size_t eb_element_size(void)
{
	return (sizeof(struct drm_i915_gem_exec_object2) +
		sizeof(struct i915_vma *) +
		sizeof(unsigned int));
}

static bool check_buffer_count(size_t count)
{
	const size_t sz = eb_element_size();

	/*
	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
	 * array size (see eb_create()). Otherwise, we can accept an array as
	 * large as can be addressed (though use large arrays at your peril)!
	 */

	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}

2444 2445 2446 2447 2448
/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
2449 2450
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
2451 2452 2453 2454 2455
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2456
	const size_t count = args->buffer_count;
2457 2458
	unsigned int i;
	int err;
2459

2460 2461
	if (!check_buffer_count(count)) {
		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2462 2463 2464
		return -EINVAL;
	}

2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

	if (!i915_gem_check_execbuffer(&exec2))
		return -EINVAL;

2479
	/* Copy in the exec list from userland */
2480
	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2481
				   __GFP_NOWARN | GFP_KERNEL);
2482
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2483
				    __GFP_NOWARN | GFP_KERNEL);
2484
	if (exec_list == NULL || exec2_list == NULL) {
2485
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2486
			  args->buffer_count);
M
Michal Hocko 已提交
2487 2488
		kvfree(exec_list);
		kvfree(exec2_list);
2489 2490
		return -ENOMEM;
	}
2491
	err = copy_from_user(exec_list,
2492
			     u64_to_user_ptr(args->buffers_ptr),
2493
			     sizeof(*exec_list) * count);
2494
	if (err) {
2495
		DRM_DEBUG("copy %d exec entries failed %d\n",
2496
			  args->buffer_count, err);
M
Michal Hocko 已提交
2497 2498
		kvfree(exec_list);
		kvfree(exec2_list);
2499 2500 2501 2502 2503 2504 2505 2506 2507
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2508
		if (INTEL_GEN(to_i915(dev)) < 4)
2509 2510 2511 2512 2513
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2514
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2515
	if (exec2.flags & __EXEC_HAS_RELOC) {
2516
		struct drm_i915_gem_exec_object __user *user_exec_list =
2517
			u64_to_user_ptr(args->buffers_ptr);
2518

2519
		/* Copy the new buffer offsets back to the user's exec list. */
2520
		for (i = 0; i < args->buffer_count; i++) {
2521 2522 2523
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2524
			exec2_list[i].offset =
2525 2526 2527 2528 2529
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
2530
				break;
2531 2532 2533
		}
	}

M
Michal Hocko 已提交
2534 2535
	kvfree(exec_list);
	kvfree(exec2_list);
2536
	return err;
2537 2538 2539
}

int
2540 2541
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file)
2542 2543
{
	struct drm_i915_gem_execbuffer2 *args = data;
2544
	struct drm_i915_gem_exec_object2 *exec2_list;
2545
	struct drm_syncobj **fences = NULL;
2546
	const size_t count = args->buffer_count;
2547
	int err;
2548

2549 2550
	if (!check_buffer_count(count)) {
		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2551 2552 2553
		return -EINVAL;
	}

2554 2555 2556 2557
	if (!i915_gem_check_execbuffer(args))
		return -EINVAL;

	/* Allocate an extra slot for use by the command parser */
2558
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2559
				    __GFP_NOWARN | GFP_KERNEL);
2560
	if (exec2_list == NULL) {
2561 2562
		DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
			  count);
2563 2564
		return -ENOMEM;
	}
2565 2566
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
2567 2568
			   sizeof(*exec2_list) * count)) {
		DRM_DEBUG("copy %zd exec entries failed\n", count);
M
Michal Hocko 已提交
2569
		kvfree(exec2_list);
2570 2571 2572
		return -EFAULT;
	}

2573 2574 2575 2576 2577 2578 2579 2580 2581
	if (args->flags & I915_EXEC_FENCE_ARRAY) {
		fences = get_fence_array(args, file);
		if (IS_ERR(fences)) {
			kvfree(exec2_list);
			return PTR_ERR(fences);
		}
	}

	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2582 2583 2584 2585 2586 2587 2588 2589

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
2590
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2591 2592
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
2593

2594 2595
		/* Copy the new buffer offsets back to the user's exec list. */
		user_access_begin();
2596
		for (i = 0; i < args->buffer_count; i++) {
2597 2598 2599
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2600
			exec2_list[i].offset =
2601 2602 2603 2604
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
2605
		}
2606 2607
end_user:
		user_access_end();
2608 2609
	}

2610
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2611
	put_fence_array(args, fences);
M
Michal Hocko 已提交
2612
	kvfree(exec2_list);
2613
	return err;
2614
}