i915_gem_execbuffer.c 70.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29
#include <linux/intel-iommu.h>
30
#include <linux/reservation.h>
31
#include <linux/sync_file.h>
32 33
#include <linux/uaccess.h>

34
#include <drm/drm_syncobj.h>
35
#include <drm/i915_drm.h>
36

37
#include "i915_drv.h"
38
#include "i915_gem_clflush.h"
39 40
#include "i915_trace.h"
#include "intel_drv.h"
41
#include "intel_frontbuffer.h"
42

43 44 45 46 47 48
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
49

50 51 52 53 54 55
#define __EXEC_OBJECT_HAS_REF		BIT(31)
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
56 57 58 59
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
#define __EXEC_VALIDATED	BIT(30)
60
#define __EXEC_INTERNAL_FLAGS	(~0u << 30)
61
#define UPDATE			PIN_OFFSET_FIXED
62 63

#define BATCH_OFFSET_BIAS (256*1024)
64

65
#define __I915_EXEC_ILLEGAL_FLAGS \
66 67 68
	(__I915_EXEC_UNKNOWN_FLAGS | \
	 I915_EXEC_CONSTANTS_MASK  | \
	 I915_EXEC_RESOURCE_STREAMER)
69

70 71 72 73 74 75 76 77 78
/* Catch emission of unexpected errors for CI! */
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#undef EINVAL
#define EINVAL ({ \
	DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
	22; \
})
#endif

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
 * At the level of talking to the hardware, submitting a batchbuffer for the
 * GPU to execute is to add content to a buffer from which the HW
 * command streamer is reading.
 *
 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
 *    Execlists, this command is not placed on the same buffer as the
 *    remaining items.
 *
 * 2. Add a command to invalidate caches to the buffer.
 *
 * 3. Add a batchbuffer start command to the buffer; the start command is
 *    essentially a token together with the GPU address of the batchbuffer
 *    to be executed.
 *
 * 4. Add a pipeline flush to the buffer.
 *
 * 5. Add a memory write command to the buffer to record when the GPU
 *    is done executing the batchbuffer. The memory write writes the
 *    global sequence number of the request, ``i915_request::global_seqno``;
 *    the i915 driver uses the current value in the register to determine
 *    if the GPU has completed the batchbuffer.
 *
 * 6. Add a user interrupt command to the buffer. This command instructs
 *    the GPU to issue an interrupt when the command, pipeline flush and
 *    memory write are completed.
 *
 * 7. Inform the hardware of the additional commands added to the buffer
 *    (by updating the tail pointer).
 *
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

230
struct i915_execbuffer {
231 232 233 234
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
235 236
	struct i915_vma **vma;
	unsigned int *flags;
237 238 239 240 241

	struct intel_engine_cs *engine; /** engine to queue the request to */
	struct i915_gem_context *ctx; /** context for building the request */
	struct i915_address_space *vm; /** GTT and vma for the request */

242
	struct i915_request *request; /** our request to build */
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	struct i915_vma *batch; /** identity of the batch obj/vma */

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
259
	struct reloc_cache {
260 261 262
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
263
		unsigned int gen; /** Cached value of INTEL_GEN */
264
		bool use_64bit_reloc : 1;
265 266 267
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
268

269
		struct i915_request *rq;
270 271
		u32 *rq_cmd;
		unsigned int rq_size;
272
	} reloc_cache;
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
288 289
};

290
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
291

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
/*
 * Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline u64 gen8_canonical_addr(u64 address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline u64 gen8_noncanonical_addr(u64 address)
{
	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}

311 312
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
313
	return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
314 315
}

316
static int eb_create(struct i915_execbuffer *eb)
317
{
318 319
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
320

321 322 323 324 325 326 327 328 329 330 331
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
332
		do {
333
			gfp_t flags;
334 335 336 337 338 339 340

			/* While we can still reduce the allocation size, don't
			 * raise a warning and allow the allocation to fail.
			 * On the last pass though, we want to try as hard
			 * as possible to perform the allocation and warn
			 * if it fails.
			 */
341
			flags = GFP_KERNEL;
342 343 344
			if (size > 1)
				flags |= __GFP_NORETRY | __GFP_NOWARN;

345
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
346
					      flags);
347 348 349 350
			if (eb->buckets)
				break;
		} while (--size);

351 352
		if (unlikely(!size))
			return -ENOMEM;
353

354
		eb->lut_size = size;
355
	} else {
356
		eb->lut_size = -eb->buffer_count;
357
	}
358

359
	return 0;
360 361
}

362 363
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
364 365
		 const struct i915_vma *vma,
		 unsigned int flags)
366 367 368 369 370 371 372
{
	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

373
	if (flags & EXEC_OBJECT_PINNED &&
374 375 376
	    vma->node.start != entry->offset)
		return true;

377
	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
378 379 380
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

381
	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
382 383 384
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

385 386 387 388
	if (flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
		return true;

389 390 391
	return false;
}

392
static inline bool
393
eb_pin_vma(struct i915_execbuffer *eb,
394
	   const struct drm_i915_gem_exec_object2 *entry,
395 396
	   struct i915_vma *vma)
{
397 398
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
399

400
	if (vma->node.size)
401
		pin_flags = vma->node.start;
402
	else
403
		pin_flags = entry->offset & PIN_OFFSET_MASK;
404

405 406 407
	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
		pin_flags |= PIN_GLOBAL;
408

409 410
	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
		return false;
411

412
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
413
		if (unlikely(i915_vma_pin_fence(vma))) {
414
			i915_vma_unpin(vma);
415
			return false;
416 417
		}

418
		if (vma->fence)
419
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
420 421
	}

422 423
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	return !eb_vma_misplaced(entry, vma, exec_flags);
424 425
}

426
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
427
{
428
	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
429

430
	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
431
		__i915_vma_unpin_fence(vma);
432

433
	__i915_vma_unpin(vma);
434 435
}

436
static inline void
437
eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
438
{
439
	if (!(*flags & __EXEC_OBJECT_HAS_PIN))
440
		return;
441

442 443
	__eb_unreserve_vma(vma, *flags);
	*flags &= ~__EXEC_OBJECT_RESERVED;
444 445
}

446 447 448 449
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
450
{
451 452
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
453

454 455 456 457 458 459 460 461
	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
462
		     entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
463 464 465 466 467 468 469 470
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
471 472
	}

473
	if (unlikely(vma->exec_flags)) {
474 475 476 477 478 479 480 481 482 483 484 485
		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
			  entry->handle, (int)(entry - eb->exec));
		return -EINVAL;
	}

	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

486 487 488 489 490 491 492 493 494 495 496 497
	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

498
	return 0;
499 500
}

501
static int
502 503 504
eb_add_vma(struct i915_execbuffer *eb,
	   unsigned int i, unsigned batch_idx,
	   struct i915_vma *vma)
505
{
506
	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
507 508 509 510 511 512 513 514
	int err;

	GEM_BUG_ON(i915_vma_is_closed(vma));

	if (!(eb->args->flags & __EXEC_VALIDATED)) {
		err = eb_validate_vma(eb, entry, vma);
		if (unlikely(err))
			return err;
515 516
	}

517
	if (eb->lut_size > 0) {
518
		vma->exec_handle = entry->handle;
519
		hlist_add_head(&vma->exec_node,
520 521
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
522
	}
523

524 525 526 527 528 529 530 531 532
	if (entry->relocation_count)
		list_add_tail(&vma->reloc_link, &eb->relocs);

	/*
	 * Stash a pointer from the vma to execobj, so we can query its flags,
	 * size, alignment etc as provided by the user. Also we stash a pointer
	 * to the vma inside the execobj so that we can use a direct lookup
	 * to find the right target VMA when doing relocations.
	 */
533
	eb->vma[i] = vma;
534
	eb->flags[i] = entry->flags;
535
	vma->exec_flags = &eb->flags[i];
536

537 538 539 540 541 542 543 544 545 546
	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	if (i == batch_idx) {
547 548
		if (entry->relocation_count &&
		    !(eb->flags[i] & EXEC_OBJECT_PINNED))
549 550 551 552 553 554 555
			eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
		if (eb->reloc_cache.has_fence)
			eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;

		eb->batch = vma;
	}

556
	err = 0;
557
	if (eb_pin_vma(eb, entry, vma)) {
558 559 560 561
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
562 563 564 565 566 567
	} else {
		eb_unreserve_vma(vma, vma->exec_flags);

		list_add_tail(&vma->exec_link, &eb->unbound);
		if (drm_mm_node_allocated(&vma->node))
			err = i915_vma_unbind(vma);
568 569
		if (unlikely(err))
			vma->exec_flags = NULL;
570 571 572 573 574 575 576 577 578 579
	}
	return err;
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

580 581 582 583 584
	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;
585 586 587 588 589 590 591 592 593

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
			  struct i915_vma *vma)
{
594 595 596
	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
597 598
	int err;

599 600 601
	pin_flags = PIN_USER | PIN_NONBLOCK;
	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
		pin_flags |= PIN_GLOBAL;
602 603 604 605 606

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
607 608
	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		pin_flags |= PIN_ZONE_4G;
609

610 611
	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
		pin_flags |= PIN_MAPPABLE;
612

613 614 615 616 617
	if (exec_flags & EXEC_OBJECT_PINNED) {
		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
		pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
	} else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
618 619
	}

620 621 622
	err = i915_vma_pin(vma,
			   entry->pad_to_size, entry->alignment,
			   pin_flags);
623 624 625 626 627 628 629 630
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

631
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
632
		err = i915_vma_pin_fence(vma);
633 634 635 636 637
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

638
		if (vma->fence)
639
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
640 641
	}

642 643
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
644

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	struct list_head last;
	struct i915_vma *vma;
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
		list_for_each_entry(vma, &eb->unbound, exec_link) {
			err = eb_reserve_vma(eb, vma);
			if (err)
				break;
		}
		if (err != -ENOSPC)
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
685 686
			unsigned int flags = eb->flags[i];
			struct i915_vma *vma = eb->vma[i];
687

688 689
			if (flags & EXEC_OBJECT_PINNED &&
			    flags & __EXEC_OBJECT_HAS_PIN)
690 691
				continue;

692
			eb_unreserve_vma(vma, &eb->flags[i]);
693

694
			if (flags & EXEC_OBJECT_PINNED)
695
				/* Pinned must have their slot */
696
				list_add(&vma->exec_link, &eb->unbound);
697
			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
698
				/* Map require the lowest 256MiB (aperture) */
699
				list_add_tail(&vma->exec_link, &eb->unbound);
700 701 702
			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
				/* Prioritise 4GiB region for restricted bo */
				list_add(&vma->exec_link, &last);
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
			else
				list_add_tail(&vma->exec_link, &last);
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
			err = i915_gem_evict_vm(eb->vm);
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
	} while (1);
723
}
724

725 726
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
727 728 729 730
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
731 732 733 734 735 736 737
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
738 739
	if (unlikely(!ctx))
		return -ENOENT;
740

741
	eb->ctx = ctx;
742 743 744 745 746 747
	if (ctx->ppgtt) {
		eb->vm = &ctx->ppgtt->vm;
		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
	} else {
		eb->vm = &eb->i915->ggtt.vm;
	}
748 749

	eb->context_flags = 0;
750
	if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
751 752 753 754 755 756
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
757
{
758
	struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
759
	struct drm_i915_gem_object *obj;
760
	unsigned int i, batch;
761
	int err;
762

763 764 765 766 767 768
	if (unlikely(i915_gem_context_is_closed(eb->ctx)))
		return -ENOENT;

	if (unlikely(i915_gem_context_is_banned(eb->ctx)))
		return -EIO;

769 770
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
771

772 773
	batch = eb_batch_index(eb);

774 775
	for (i = 0; i < eb->buffer_count; i++) {
		u32 handle = eb->exec[i].handle;
776
		struct i915_lut_handle *lut;
777
		struct i915_vma *vma;
778

779 780
		vma = radix_tree_lookup(handles_vma, handle);
		if (likely(vma))
781
			goto add_vma;
782

783
		obj = i915_gem_object_lookup(eb->file, handle);
784
		if (unlikely(!obj)) {
785
			err = -ENOENT;
786
			goto err_vma;
787 788
		}

789
		vma = i915_vma_instance(obj, eb->vm, NULL);
C
Chris Wilson 已提交
790
		if (unlikely(IS_ERR(vma))) {
791
			err = PTR_ERR(vma);
792
			goto err_obj;
793 794
		}

795 796 797 798 799 800 801 802
		lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
		if (unlikely(!lut)) {
			err = -ENOMEM;
			goto err_obj;
		}

		err = radix_tree_insert(handles_vma, handle, vma);
		if (unlikely(err)) {
803
			kmem_cache_free(eb->i915->luts, lut);
804
			goto err_obj;
805
		}
806

807
		/* transfer ref to ctx */
808 809
		if (!vma->open_count++)
			i915_vma_reopen(vma);
810 811 812 813 814
		list_add(&lut->obj_link, &obj->lut_list);
		list_add(&lut->ctx_link, &eb->ctx->handles_list);
		lut->ctx = eb->ctx;
		lut->handle = handle;

815
add_vma:
816
		err = eb_add_vma(eb, i, batch, vma);
817
		if (unlikely(err))
818
			goto err_vma;
819

820 821
		GEM_BUG_ON(vma != eb->vma[i]);
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
822 823
		GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
			   eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
824 825
	}

826 827 828
	eb->args->flags |= __EXEC_VALIDATED;
	return eb_reserve(eb);

829
err_obj:
830
	i915_gem_object_put(obj);
831 832
err_vma:
	eb->vma[i] = NULL;
833
	return err;
834 835
}

836
static struct i915_vma *
837
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
838
{
839 840
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
841
			return NULL;
842
		return eb->vma[handle];
843 844
	} else {
		struct hlist_head *head;
845
		struct i915_vma *vma;
846

847
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
848
		hlist_for_each_entry(vma, head, exec_node) {
849 850
			if (vma->exec_handle == handle)
				return vma;
851 852 853
		}
		return NULL;
	}
854 855
}

856
static void eb_release_vmas(const struct i915_execbuffer *eb)
857
{
858 859 860 861
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
862 863
		struct i915_vma *vma = eb->vma[i];
		unsigned int flags = eb->flags[i];
864

865
		if (!vma)
866
			break;
867

868 869 870
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
		vma->exec_flags = NULL;
		eb->vma[i] = NULL;
871

872 873
		if (flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, flags);
874

875
		if (flags & __EXEC_OBJECT_HAS_REF)
876
			i915_vma_put(vma);
877
	}
878 879
}

880
static void eb_reset_vmas(const struct i915_execbuffer *eb)
881
{
882
	eb_release_vmas(eb);
883
	if (eb->lut_size > 0)
884 885
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_size);
886 887
}

888
static void eb_destroy(const struct i915_execbuffer *eb)
889
{
890 891
	GEM_BUG_ON(eb->reloc_cache.rq);

892
	if (eb->lut_size > 0)
893
		kfree(eb->buckets);
894 895
}

896
static inline u64
897
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
898
		  const struct i915_vma *target)
899
{
900
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
901 902
}

903 904
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
905
{
906
	cache->page = -1;
907
	cache->vaddr = 0;
908
	/* Must be a variable in the struct to allow GCC to unroll. */
909
	cache->gen = INTEL_GEN(i915);
910
	cache->has_llc = HAS_LLC(i915);
911
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
912 913
	cache->has_fence = cache->gen < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
914
	cache->node.allocated = false;
915 916
	cache->rq = NULL;
	cache->rq_size = 0;
917
}
918

919 920 921 922 923 924 925 926
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
927 928
}

929 930
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

931 932 933 934 935 936 937
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

938 939 940 941 942 943 944
static void reloc_gpu_flush(struct reloc_cache *cache)
{
	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
	i915_gem_object_unpin_map(cache->rq->batch->obj);
	i915_gem_chipset_flush(cache->rq->i915);

945
	i915_request_add(cache->rq);
946 947 948
	cache->rq = NULL;
}

949
static void reloc_cache_reset(struct reloc_cache *cache)
950
{
951
	void *vaddr;
952

953 954 955
	if (cache->rq)
		reloc_gpu_flush(cache);

956 957
	if (!cache->vaddr)
		return;
958

959 960 961 962
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
963

964 965 966
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
967
		wmb();
968
		io_mapping_unmap_atomic((void __iomem *)vaddr);
969
		if (cache->node.allocated) {
970
			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
971

972 973 974
			ggtt->vm.clear_range(&ggtt->vm,
					     cache->node.start,
					     cache->node.size);
975 976 977
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
978
		}
979
	}
980 981 982

	cache->vaddr = 0;
	cache->page = -1;
983 984 985 986
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
987
			unsigned long page)
988
{
989 990 991 992 993 994
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
995
		int err;
996

997 998 999
		err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
		if (err)
			return ERR_PTR(err);
1000 1001 1002

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
1003

1004 1005 1006 1007
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
1008 1009
	}

1010 1011
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1012
	cache->page = page;
1013

1014
	return vaddr;
1015 1016
}

1017 1018
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1019
			 unsigned long page)
1020
{
1021
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1022
	unsigned long offset;
1023
	void *vaddr;
1024

1025
	if (cache->vaddr) {
1026
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1027 1028
	} else {
		struct i915_vma *vma;
1029
		int err;
1030

1031
		if (use_cpu_reloc(cache, obj))
1032
			return NULL;
1033

1034 1035 1036
		err = i915_gem_object_set_to_gtt_domain(obj, true);
		if (err)
			return ERR_PTR(err);
1037

1038
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1039 1040 1041
					       PIN_MAPPABLE |
					       PIN_NONBLOCK |
					       PIN_NONFAULT);
1042 1043
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1044
			err = drm_mm_insert_node_in_range
1045
				(&ggtt->vm.mm, &cache->node,
1046
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1047
				 0, ggtt->mappable_end,
1048
				 DRM_MM_INSERT_LOW);
1049
			if (err) /* no inactive aperture space, use cpu reloc */
1050
				return NULL;
1051
		} else {
1052 1053
			err = i915_vma_put_fence(vma);
			if (err) {
1054
				i915_vma_unpin(vma);
1055
				return ERR_PTR(err);
1056
			}
1057

1058 1059
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1060
		}
1061
	}
1062

1063 1064
	offset = cache->node.start;
	if (cache->node.allocated) {
1065
		wmb();
1066 1067 1068
		ggtt->vm.insert_page(&ggtt->vm,
				     i915_gem_object_get_dma_address(obj, page),
				     offset, I915_CACHE_NONE, 0);
1069 1070
	} else {
		offset += page << PAGE_SHIFT;
1071 1072
	}

1073
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1074
							 offset);
1075 1076
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1077

1078
	return vaddr;
1079 1080
}

1081 1082
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1083
			 unsigned long page)
1084
{
1085
	void *vaddr;
1086

1087 1088 1089 1090 1091 1092 1093 1094
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1095 1096
	}

1097
	return vaddr;
1098 1099
}

1100
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1101
{
1102 1103 1104 1105 1106
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1107

1108
		*addr = value;
1109

1110 1111
		/*
		 * Writes to the same cacheline are serialised by the CPU
1112 1113 1114 1115 1116 1117 1118 1119 1120
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1121 1122
}

1123 1124 1125 1126 1127 1128
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     struct i915_vma *vma,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	struct drm_i915_gem_object *obj;
1129
	struct i915_request *rq;
1130 1131 1132 1133
	struct i915_vma *batch;
	u32 *cmd;
	int err;

1134 1135 1136 1137 1138 1139 1140
	if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) {
		obj = vma->obj;
		if (obj->cache_dirty & ~obj->cache_coherent)
			i915_gem_clflush_object(obj, 0);
		obj->write_domain = 0;
	}

1141
	GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
1142 1143 1144 1145 1146 1147

	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	cmd = i915_gem_object_pin_map(obj,
1148 1149 1150
				      cache->has_llc ?
				      I915_MAP_FORCE_WB :
				      I915_MAP_FORCE_WC);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	i915_gem_object_unpin_pages(obj);
	if (IS_ERR(cmd))
		return PTR_ERR(cmd);

	err = i915_gem_object_set_to_wc_domain(obj, false);
	if (err)
		goto err_unmap;

	batch = i915_vma_instance(obj, vma->vm, NULL);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
	}

	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
	if (err)
		goto err_unmap;

1169
	rq = i915_request_alloc(eb->engine, eb->ctx);
1170 1171 1172 1173 1174
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

1175
	err = i915_request_await_object(rq, vma->obj, true);
1176 1177 1178 1179 1180 1181 1182 1183 1184
	if (err)
		goto err_request;

	err = eb->engine->emit_bb_start(rq,
					batch->node.start, PAGE_SIZE,
					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
	if (err)
		goto err_request;

1185
	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
1186 1187 1188
	err = i915_vma_move_to_active(batch, rq, 0);
	if (err)
		goto skip_request;
1189

1190 1191 1192
	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
	if (err)
		goto skip_request;
1193 1194

	rq->batch = batch;
1195
	i915_vma_unpin(batch);
1196 1197 1198 1199 1200 1201 1202 1203

	cache->rq = rq;
	cache->rq_cmd = cmd;
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
	return 0;

1204 1205
skip_request:
	i915_request_skip(rq, err);
1206
err_request:
1207
	i915_request_add(rq);
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
	i915_gem_object_unpin_map(obj);
	return err;
}

static u32 *reloc_gpu(struct i915_execbuffer *eb,
		      struct i915_vma *vma,
		      unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	u32 *cmd;

	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
		reloc_gpu_flush(cache);

	if (unlikely(!cache->rq)) {
		int err;

1228 1229 1230 1231
		/* If we need to copy for the cmdparser, we will stall anyway */
		if (eb_use_cmdparser(eb))
			return ERR_PTR(-EWOULDBLOCK);

1232 1233 1234
		if (!intel_engine_can_store_dword(eb->engine))
			return ERR_PTR(-ENODEV);

1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
		err = __reloc_gpu_alloc(eb, vma, len);
		if (unlikely(err))
			return ERR_PTR(err);
	}

	cmd = cache->rq_cmd + cache->rq_size;
	cache->rq_size += len;

	return cmd;
}

1246 1247
static u64
relocate_entry(struct i915_vma *vma,
1248
	       const struct drm_i915_gem_relocation_entry *reloc,
1249 1250
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1251
{
1252
	u64 offset = reloc->offset;
1253 1254
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1255
	void *vaddr;
1256

1257 1258
	if (!eb->reloc_cache.vaddr &&
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1259
	     !reservation_object_test_signaled_rcu(vma->resv, true))) {
1260 1261 1262 1263 1264 1265 1266 1267 1268
		const unsigned int gen = eb->reloc_cache.gen;
		unsigned int len;
		u32 *batch;
		u64 addr;

		if (wide)
			len = offset & 7 ? 8 : 5;
		else if (gen >= 4)
			len = 4;
1269
		else
1270
			len = 3;
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315

		batch = reloc_gpu(eb, vma, len);
		if (IS_ERR(batch))
			goto repeat;

		addr = gen8_canonical_addr(vma->node.start + offset);
		if (wide) {
			if (offset & 7) {
				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);

				addr = gen8_canonical_addr(addr + 4);

				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = upper_32_bits(target_offset);
			} else {
				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);
				*batch++ = upper_32_bits(target_offset);
			}
		} else if (gen >= 6) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else if (gen >= 4) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else {
			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
			*batch++ = addr;
			*batch++ = target_offset;
		}

		goto out;
	}

1316
repeat:
1317
	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1318 1319 1320 1321 1322
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1323
			eb->reloc_cache.vaddr);
1324 1325 1326 1327 1328 1329

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1330 1331
	}

1332
out:
1333
	return target->node.start | UPDATE;
1334 1335
}

1336 1337 1338 1339
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
		  struct i915_vma *vma,
		  const struct drm_i915_gem_relocation_entry *reloc)
1340
{
1341
	struct i915_vma *target;
1342
	int err;
1343

1344
	/* we've already hold a reference to all valid objects */
1345 1346
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1347
		return -ENOENT;
1348

1349
	/* Validate that the target is in a valid r/w GPU domain */
1350
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1351
		DRM_DEBUG("reloc with multiple write domains: "
1352
			  "target %d offset %d "
1353
			  "read %08x write %08x",
1354
			  reloc->target_handle,
1355 1356 1357
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1358
		return -EINVAL;
1359
	}
1360 1361
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1362
		DRM_DEBUG("reloc with read/write non-GPU domains: "
1363
			  "target %d offset %d "
1364
			  "read %08x write %08x",
1365
			  reloc->target_handle,
1366 1367 1368
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1369
		return -EINVAL;
1370 1371
	}

1372
	if (reloc->write_domain) {
1373
		*target->exec_flags |= EXEC_OBJECT_WRITE;
1374

1375 1376 1377 1378 1379 1380 1381
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1382
		    IS_GEN(eb->i915, 6)) {
1383 1384 1385 1386 1387 1388
			err = i915_vma_bind(target, target->obj->cache_level,
					    PIN_GLOBAL);
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1389
	}
1390

1391 1392
	/*
	 * If the relocation already has the right value in it, no
1393 1394
	 * more work needs to be done.
	 */
1395 1396
	if (!DBG_FORCE_RELOC &&
	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1397
		return 0;
1398 1399

	/* Check that the relocation address is valid... */
1400
	if (unlikely(reloc->offset >
1401
		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1402
		DRM_DEBUG("Relocation beyond object bounds: "
1403 1404 1405 1406
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
			  (int)vma->size);
1407
		return -EINVAL;
1408
	}
1409
	if (unlikely(reloc->offset & 3)) {
1410
		DRM_DEBUG("Relocation not 4-byte aligned: "
1411 1412 1413
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1414
		return -EINVAL;
1415 1416
	}

1417 1418 1419 1420 1421 1422
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
1423
	 * out of our synchronisation.
1424
	 */
1425
	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1426

1427
	/* and update the user's relocation entry */
1428
	return relocate_entry(vma, reloc, eb, target);
1429 1430
}

1431
static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1432
{
1433
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1434 1435
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
1436
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1437
	unsigned int remain;
1438

1439
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1440
	remain = entry->relocation_count;
1441 1442
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1443

1444 1445 1446 1447 1448
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
1449
	if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1450 1451 1452 1453 1454 1455 1456
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1457

1458 1459
		/*
		 * This is the fast path and we cannot handle a pagefault
1460 1461 1462 1463 1464 1465 1466
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1467
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1468
		pagefault_enable();
1469 1470
		if (unlikely(copied)) {
			remain = -EFAULT;
1471 1472
			goto out;
		}
1473

1474
		remain -= count;
1475
		do {
1476
			u64 offset = eb_relocate_entry(eb, vma, r);
1477

1478 1479 1480
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1481
				goto out;
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
1505 1506 1507 1508
				if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
					remain = -EFAULT;
					goto out;
				}
1509
			}
1510 1511 1512
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1513
out:
1514
	reloc_cache_reset(&eb->reloc_cache);
1515
	return remain;
1516 1517 1518
}

static int
1519
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1520
{
1521
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1522 1523 1524 1525
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1526 1527

	for (i = 0; i < entry->relocation_count; i++) {
1528
		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1529

1530 1531 1532 1533
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1534
	}
1535 1536 1537 1538
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1539 1540
}

1541
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1542
{
1543 1544 1545
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1546

1547 1548 1549
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1550

1551 1552
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1553

1554 1555
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
1556
	if (!access_ok(addr, size))
1557
		return -EFAULT;
1558

1559 1560 1561 1562 1563
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1564
	}
1565
	return __get_user(c, end - 1);
1566
}
1567

1568
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1569
{
1570 1571 1572
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1573

1574 1575 1576 1577 1578 1579
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		struct drm_i915_gem_relocation_entry *relocs;
		unsigned long size;
		unsigned long copied;
1580

1581 1582
		if (nreloc == 0)
			continue;
1583

1584 1585 1586
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1587

1588 1589
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1590

1591
		relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1592 1593 1594 1595
		if (!relocs) {
			err = -ENOMEM;
			goto err;
		}
1596

1597 1598 1599 1600 1601 1602 1603
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
1604
					     (char __user *)urelocs + copied,
1605
					     len)) {
1606
end_user:
1607
				user_access_end();
1608 1609 1610 1611
				kvfree(relocs);
				err = -EFAULT;
				goto err;
			}
1612

1613 1614
			copied += len;
		} while (copied < size);
1615

1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
1626 1627 1628
		if (!user_access_begin(urelocs, size))
			goto end_user;

1629 1630 1631 1632 1633
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
		user_access_end();
1634

1635 1636
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1637

1638
	return 0;
1639

1640 1641 1642 1643 1644 1645 1646 1647
err:
	while (i--) {
		struct drm_i915_gem_relocation_entry *relocs =
			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1648 1649
}

1650
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1651
{
1652 1653
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1654

1655
	if (unlikely(i915_modparams.prefault_disable))
1656
		return 0;
1657

1658 1659
	for (i = 0; i < count; i++) {
		int err;
1660

1661 1662 1663 1664
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1665

1666
	return 0;
1667 1668
}

1669
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1670
{
1671
	struct drm_device *dev = &eb->i915->drm;
1672
	bool have_copy = false;
1673
	struct i915_vma *vma;
1674 1675 1676 1677 1678 1679 1680
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1681

1682
	/* We may process another execbuffer during the unlock... */
1683
	eb_reset_vmas(eb);
1684 1685
	mutex_unlock(&dev->struct_mutex);

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1707
	}
1708 1709 1710
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1711 1712
	}

1713 1714 1715
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1716 1717
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1718
		mutex_lock(&dev->struct_mutex);
1719
		goto out;
1720 1721
	}

1722
	/* reacquire the objects */
1723 1724
	err = eb_lookup_vmas(eb);
	if (err)
1725
		goto err;
1726

1727 1728
	GEM_BUG_ON(!eb->batch);

1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
	list_for_each_entry(vma, &eb->relocs, reloc_link) {
		if (!have_copy) {
			pagefault_disable();
			err = eb_relocate_vma(eb, vma);
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
			err = eb_relocate_vma_slow(eb, vma);
			if (err)
				goto err;
		}
1741 1742
	}

1743 1744
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1745 1746 1747 1748 1749 1750
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

1772
	return err;
1773 1774
}

1775
static int eb_relocate(struct i915_execbuffer *eb)
1776
{
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
	if (eb_lookup_vmas(eb))
		goto slow;

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, vma))
				goto slow;
		}
	}

	return 0;

slow:
	return eb_relocate_slow(eb);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1801

1802
	for (i = 0; i < count; i++) {
1803 1804
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];
1805
		struct drm_i915_gem_object *obj = vma->obj;
1806

1807
		if (flags & EXEC_OBJECT_CAPTURE) {
1808
			struct i915_capture_list *capture;
1809 1810 1811 1812 1813

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (unlikely(!capture))
				return -ENOMEM;

1814
			capture->next = eb->request->capture_list;
1815
			capture->vma = eb->vma[i];
1816
			eb->request->capture_list = capture;
1817 1818
		}

1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
		/*
		 * If the GPU is not _reading_ through the CPU cache, we need
		 * to make sure that any writes (both previous GPU writes from
		 * before a change in snooping levels and normal CPU writes)
		 * caught in that cache are flushed to main memory.
		 *
		 * We want to say
		 *   obj->cache_dirty &&
		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		 * but gcc's optimiser doesn't handle that as well and emits
		 * two jumps instead of one. Maybe one day...
		 */
		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1832
			if (i915_gem_clflush_object(obj, 0))
1833
				flags &= ~EXEC_OBJECT_ASYNC;
1834 1835
		}

1836 1837
		if (flags & EXEC_OBJECT_ASYNC)
			continue;
1838

1839
		err = i915_request_await_object
1840
			(eb->request, obj, flags & EXEC_OBJECT_WRITE);
1841 1842 1843 1844 1845
		if (err)
			return err;
	}

	for (i = 0; i < count; i++) {
1846 1847 1848
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];

1849 1850 1851 1852 1853
		err = i915_vma_move_to_active(vma, eb->request, flags);
		if (unlikely(err)) {
			i915_request_skip(eb->request, err);
			return err;
		}
1854

1855 1856 1857 1858
		__eb_unreserve_vma(vma, flags);
		vma->exec_flags = NULL;

		if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1859
			i915_vma_put(vma);
1860
	}
1861
	eb->exec = NULL;
1862

1863
	/* Unconditionally flush any chipset caches (for streaming writes). */
1864
	i915_gem_chipset_flush(eb->i915);
1865

1866
	return 0;
1867 1868
}

1869
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1870
{
1871
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1872 1873
		return false;

C
Chris Wilson 已提交
1874
	/* Kernel clipping was a DRI1 misfeature */
1875 1876 1877 1878
	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
		if (exec->num_cliprects || exec->cliprects_ptr)
			return false;
	}
C
Chris Wilson 已提交
1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1891 1892
}

1893
static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1894
{
1895 1896
	u32 *cs;
	int i;
1897

1898
	if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
1899 1900 1901
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1902

1903
	cs = intel_ring_begin(rq, 4 * 2 + 2);
1904 1905
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1906

1907
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1908
	for (i = 0; i < 4; i++) {
1909 1910
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1911
	}
1912
	*cs++ = MI_NOOP;
1913
	intel_ring_advance(rq, cs);
1914 1915 1916 1917

	return 0;
}

1918
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1919 1920
{
	struct drm_i915_gem_object *shadow_batch_obj;
1921
	struct i915_vma *vma;
1922
	int err;
1923

1924 1925
	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
1926
	if (IS_ERR(shadow_batch_obj))
1927
		return ERR_CAST(shadow_batch_obj);
1928

1929
	err = intel_engine_cmd_parser(eb->engine,
1930
				      eb->batch->obj,
1931
				      shadow_batch_obj,
1932 1933
				      eb->batch_start_offset,
				      eb->batch_len,
1934
				      is_master);
1935 1936
	if (err) {
		if (err == -EACCES) /* unhandled chained batch */
C
Chris Wilson 已提交
1937 1938
			vma = NULL;
		else
1939
			vma = ERR_PTR(err);
C
Chris Wilson 已提交
1940 1941
		goto out;
	}
1942

C
Chris Wilson 已提交
1943 1944 1945
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
1946

1947 1948 1949 1950 1951
	eb->vma[eb->buffer_count] = i915_vma_get(vma);
	eb->flags[eb->buffer_count] =
		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
	vma->exec_flags = &eb->flags[eb->buffer_count];
	eb->buffer_count++;
1952

C
Chris Wilson 已提交
1953
out:
C
Chris Wilson 已提交
1954
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
1955
	return vma;
1956
}
1957

1958
static void
1959
add_to_client(struct i915_request *rq, struct drm_file *file)
1960
{
1961 1962
	rq->file_priv = file->driver_priv;
	list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
1963 1964
}

1965
static int eb_submit(struct i915_execbuffer *eb)
1966
{
1967
	int err;
1968

1969 1970 1971
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
1972

1973
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
1974 1975 1976
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
1977 1978
	}

1979
	err = eb->engine->emit_bb_start(eb->request,
1980 1981 1982
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
1983 1984 1985
					eb->batch_flags);
	if (err)
		return err;
1986

C
Chris Wilson 已提交
1987
	return 0;
1988 1989
}

1990
/*
1991
 * Find one BSD ring to dispatch the corresponding BSD command.
1992
 * The engine index is returned.
1993
 */
1994
static unsigned int
1995 1996
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
1997 1998 1999
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

2000
	/* Check whether the file_priv has already selected one ring. */
2001 2002 2003
	if ((int)file_priv->bsd_engine < 0)
		file_priv->bsd_engine = atomic_fetch_xor(1,
			 &dev_priv->mm.bsd_engine_dispatch_index);
2004

2005
	return file_priv->bsd_engine;
2006 2007
}

2008 2009
#define I915_USER_RINGS (4)

2010
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
2011 2012 2013 2014 2015 2016 2017
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

2018 2019 2020 2021
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
2022 2023
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2024
	struct intel_engine_cs *engine;
2025 2026 2027

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
2028
		return NULL;
2029 2030 2031 2032 2033 2034
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
2035
		return NULL;
2036 2037 2038 2039 2040 2041
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2042
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
2043 2044
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2045
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2046 2047 2048 2049
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
2050
			return NULL;
2051 2052
		}

2053
		engine = dev_priv->engine[_VCS(bsd_idx)];
2054
	} else {
2055
		engine = dev_priv->engine[user_ring_map[user_ring_id]];
2056 2057
	}

2058
	if (!engine) {
2059
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
2060
		return NULL;
2061 2062
	}

2063
	return engine;
2064 2065
}

2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077
static void
__free_fence_array(struct drm_syncobj **fences, unsigned int n)
{
	while (n--)
		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
	kvfree(fences);
}

static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_file *file)
{
2078
	const unsigned long nfences = args->num_cliprects;
2079 2080
	struct drm_i915_gem_exec_fence __user *user;
	struct drm_syncobj **fences;
2081
	unsigned long n;
2082 2083 2084 2085 2086
	int err;

	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
		return NULL;

2087 2088 2089 2090 2091
	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
	if (nfences > min_t(unsigned long,
			    ULONG_MAX / sizeof(*user),
			    SIZE_MAX / sizeof(*fences)))
2092 2093 2094
		return ERR_PTR(-EINVAL);

	user = u64_to_user_ptr(args->cliprects_ptr);
2095
	if (!access_ok(user, nfences * sizeof(*user)))
2096 2097
		return ERR_PTR(-EFAULT);

2098
	fences = kvmalloc_array(nfences, sizeof(*fences),
2099
				__GFP_NOWARN | GFP_KERNEL);
2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111
	if (!fences)
		return ERR_PTR(-ENOMEM);

	for (n = 0; n < nfences; n++) {
		struct drm_i915_gem_exec_fence fence;
		struct drm_syncobj *syncobj;

		if (__copy_from_user(&fence, user++, sizeof(fence))) {
			err = -EFAULT;
			goto err;
		}

2112 2113 2114 2115 2116
		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
			err = -EINVAL;
			goto err;
		}

2117 2118 2119 2120 2121 2122 2123
		syncobj = drm_syncobj_find(file, fence.handle);
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
			err = -ENOENT;
			goto err;
		}

2124 2125 2126
		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);

2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
	}

	return fences;

err:
	__free_fence_array(fences, n);
	return ERR_PTR(err);
}

static void
put_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_syncobj **fences)
{
	if (fences)
		__free_fence_array(fences, args->num_cliprects);
}

static int
await_fence_array(struct i915_execbuffer *eb,
		  struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	unsigned int n;
	int err;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		struct dma_fence *fence;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_WAIT))
			continue;

J
Jason Ekstrand 已提交
2162
		fence = drm_syncobj_fence_get(syncobj);
2163 2164 2165
		if (!fence)
			return -EINVAL;

2166
		err = i915_request_await_dma_fence(eb->request, fence);
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
		dma_fence_put(fence);
		if (err < 0)
			return err;
	}

	return 0;
}

static void
signal_fence_array(struct i915_execbuffer *eb,
		   struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	struct dma_fence * const fence = &eb->request->fence;
	unsigned int n;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_SIGNAL))
			continue;

2191
		drm_syncobj_replace_fence(syncobj, fence);
2192 2193 2194
	}
}

2195
static int
2196
i915_gem_do_execbuffer(struct drm_device *dev,
2197 2198
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
2199 2200
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_syncobj **fences)
2201
{
2202
	struct i915_execbuffer eb;
2203 2204 2205
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
2206
	int err;
2207

2208
	BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2209 2210
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2211

2212 2213 2214
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
2215
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2216
		args->flags |= __EXEC_HAS_RELOC;
2217

2218
	eb.exec = exec;
2219 2220
	eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
	eb.vma[0] = NULL;
2221 2222
	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);

2223
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2224 2225
	reloc_cache_init(&eb.reloc_cache, eb.i915);

2226
	eb.buffer_count = args->buffer_count;
2227 2228 2229
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

2230
	eb.batch_flags = 0;
2231
	if (args->flags & I915_EXEC_SECURE) {
2232
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2233 2234
		    return -EPERM;

2235
		eb.batch_flags |= I915_DISPATCH_SECURE;
2236
	}
2237
	if (args->flags & I915_EXEC_IS_PINNED)
2238
		eb.batch_flags |= I915_DISPATCH_PINNED;
2239

2240 2241
	eb.engine = eb_select_engine(eb.i915, file, args);
	if (!eb.engine)
2242 2243
		return -EINVAL;

2244 2245
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2246 2247
		if (!in_fence)
			return -EINVAL;
2248 2249 2250 2251 2252
	}

	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
2253
			err = out_fence_fd;
2254
			goto err_in_fence;
2255 2256 2257
		}
	}

2258 2259 2260 2261 2262
	err = eb_create(&eb);
	if (err)
		goto err_out_fence;

	GEM_BUG_ON(!eb.lut_size);
2263

2264 2265 2266 2267
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_destroy;

2268 2269
	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
2270 2271 2272 2273 2274
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
2275
	intel_runtime_pm_get(eb.i915);
2276

2277 2278 2279
	err = i915_mutex_lock_interruptible(dev);
	if (err)
		goto err_rpm;
2280

2281
	err = eb_relocate(&eb);
2282
	if (err) {
2283 2284 2285 2286 2287 2288 2289 2290 2291
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
		goto err_vma;
2292
	}
2293

2294
	if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2295
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2296 2297
		err = -EINVAL;
		goto err_vma;
2298
	}
2299 2300
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2301
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2302 2303
		err = -EINVAL;
		goto err_vma;
2304
	}
2305

2306
	if (eb_use_cmdparser(&eb)) {
2307 2308
		struct i915_vma *vma;

2309
		vma = eb_parse(&eb, drm_is_current_master(file));
2310
		if (IS_ERR(vma)) {
2311 2312
			err = PTR_ERR(vma);
			goto err_vma;
2313
		}
2314

2315
		if (vma) {
2316 2317 2318 2319 2320 2321 2322 2323 2324
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
2325
			eb.batch_flags |= I915_DISPATCH_SECURE;
2326 2327
			eb.batch_start_offset = 0;
			eb.batch = vma;
2328
		}
2329 2330
	}

2331 2332
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
2333

2334 2335
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2336
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2337
	 * hsw should have this fixed, but bdw mucks it up again. */
2338
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2339
		struct i915_vma *vma;
2340

2341 2342 2343 2344 2345 2346
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2347
		 *   so we don't really have issues with multiple objects not
2348 2349 2350
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2351
		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2352
		if (IS_ERR(vma)) {
2353 2354
			err = PTR_ERR(vma);
			goto err_vma;
C
Chris Wilson 已提交
2355
		}
2356

2357
		eb.batch = vma;
2358
	}
2359

2360 2361 2362
	/* All GPU relocation batches must be submitted prior to the user rq */
	GEM_BUG_ON(eb.reloc_cache.rq);

2363
	/* Allocate a request for this batch buffer nice and early. */
2364
	eb.request = i915_request_alloc(eb.engine, eb.ctx);
2365
	if (IS_ERR(eb.request)) {
2366
		err = PTR_ERR(eb.request);
2367
		goto err_batch_unpin;
2368
	}
2369

2370
	if (in_fence) {
2371
		err = i915_request_await_dma_fence(eb.request, in_fence);
2372
		if (err < 0)
2373 2374 2375
			goto err_request;
	}

2376 2377 2378 2379 2380 2381
	if (fences) {
		err = await_fence_array(&eb, fences);
		if (err)
			goto err_request;
	}

2382
	if (out_fence_fd != -1) {
2383
		out_fence = sync_file_create(&eb.request->fence);
2384
		if (!out_fence) {
2385
			err = -ENOMEM;
2386 2387 2388 2389
			goto err_request;
		}
	}

2390 2391
	/*
	 * Whilst this request exists, batch_obj will be on the
2392 2393 2394 2395 2396
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2397
	eb.request->batch = eb.batch;
2398

2399
	trace_i915_request_queue(eb.request, eb.batch_flags);
2400
	err = eb_submit(&eb);
2401
err_request:
2402
	i915_request_add(eb.request);
2403
	add_to_client(eb.request, file);
2404

2405 2406 2407
	if (fences)
		signal_fence_array(&eb, fences);

2408
	if (out_fence) {
2409
		if (err == 0) {
2410
			fd_install(out_fence_fd, out_fence->file);
2411
			args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2412 2413 2414 2415 2416 2417
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2418

2419
err_batch_unpin:
2420
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2421
		i915_vma_unpin(eb.batch);
2422 2423 2424
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
2425
	mutex_unlock(&dev->struct_mutex);
2426
err_rpm:
2427
	intel_runtime_pm_put_unchecked(eb.i915);
2428 2429
	i915_gem_context_put(eb.ctx);
err_destroy:
2430
	eb_destroy(&eb);
2431
err_out_fence:
2432 2433
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2434
err_in_fence:
2435
	dma_fence_put(in_fence);
2436
	return err;
2437 2438
}

2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
static size_t eb_element_size(void)
{
	return (sizeof(struct drm_i915_gem_exec_object2) +
		sizeof(struct i915_vma *) +
		sizeof(unsigned int));
}

static bool check_buffer_count(size_t count)
{
	const size_t sz = eb_element_size();

	/*
	 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
	 * array size (see eb_create()). Otherwise, we can accept an array as
	 * large as can be addressed (though use large arrays at your peril)!
	 */

	return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
}

2459 2460 2461 2462 2463
/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
2464 2465
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file)
2466 2467 2468 2469 2470
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2471
	const size_t count = args->buffer_count;
2472 2473
	unsigned int i;
	int err;
2474

2475 2476
	if (!check_buffer_count(count)) {
		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2477 2478 2479
		return -EINVAL;
	}

2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

	if (!i915_gem_check_execbuffer(&exec2))
		return -EINVAL;

2494
	/* Copy in the exec list from userland */
2495
	exec_list = kvmalloc_array(count, sizeof(*exec_list),
2496
				   __GFP_NOWARN | GFP_KERNEL);
2497
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2498
				    __GFP_NOWARN | GFP_KERNEL);
2499
	if (exec_list == NULL || exec2_list == NULL) {
2500
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2501
			  args->buffer_count);
M
Michal Hocko 已提交
2502 2503
		kvfree(exec_list);
		kvfree(exec2_list);
2504 2505
		return -ENOMEM;
	}
2506
	err = copy_from_user(exec_list,
2507
			     u64_to_user_ptr(args->buffers_ptr),
2508
			     sizeof(*exec_list) * count);
2509
	if (err) {
2510
		DRM_DEBUG("copy %d exec entries failed %d\n",
2511
			  args->buffer_count, err);
M
Michal Hocko 已提交
2512 2513
		kvfree(exec_list);
		kvfree(exec2_list);
2514 2515 2516 2517 2518 2519 2520 2521 2522
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2523
		if (INTEL_GEN(to_i915(dev)) < 4)
2524 2525 2526 2527 2528
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2529
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2530
	if (exec2.flags & __EXEC_HAS_RELOC) {
2531
		struct drm_i915_gem_exec_object __user *user_exec_list =
2532
			u64_to_user_ptr(args->buffers_ptr);
2533

2534
		/* Copy the new buffer offsets back to the user's exec list. */
2535
		for (i = 0; i < args->buffer_count; i++) {
2536 2537 2538
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2539
			exec2_list[i].offset =
2540 2541 2542 2543 2544
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
2545
				break;
2546 2547 2548
		}
	}

M
Michal Hocko 已提交
2549 2550
	kvfree(exec_list);
	kvfree(exec2_list);
2551
	return err;
2552 2553 2554
}

int
2555 2556
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file)
2557 2558
{
	struct drm_i915_gem_execbuffer2 *args = data;
2559
	struct drm_i915_gem_exec_object2 *exec2_list;
2560
	struct drm_syncobj **fences = NULL;
2561
	const size_t count = args->buffer_count;
2562
	int err;
2563

2564 2565
	if (!check_buffer_count(count)) {
		DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2566 2567 2568
		return -EINVAL;
	}

2569 2570 2571 2572
	if (!i915_gem_check_execbuffer(args))
		return -EINVAL;

	/* Allocate an extra slot for use by the command parser */
2573
	exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2574
				    __GFP_NOWARN | GFP_KERNEL);
2575
	if (exec2_list == NULL) {
2576 2577
		DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
			  count);
2578 2579
		return -ENOMEM;
	}
2580 2581
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
2582 2583
			   sizeof(*exec2_list) * count)) {
		DRM_DEBUG("copy %zd exec entries failed\n", count);
M
Michal Hocko 已提交
2584
		kvfree(exec2_list);
2585 2586 2587
		return -EFAULT;
	}

2588 2589 2590 2591 2592 2593 2594 2595 2596
	if (args->flags & I915_EXEC_FENCE_ARRAY) {
		fences = get_fence_array(args, file);
		if (IS_ERR(fences)) {
			kvfree(exec2_list);
			return PTR_ERR(fences);
		}
	}

	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2597 2598 2599 2600 2601 2602 2603 2604

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
2605
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2606 2607
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
2608

2609
		/* Copy the new buffer offsets back to the user's exec list. */
2610 2611 2612 2613 2614 2615 2616 2617 2618 2619
		/*
		 * Note: count * sizeof(*user_exec_list) does not overflow,
		 * because we checked 'count' in check_buffer_count().
		 *
		 * And this range already got effectively checked earlier
		 * when we did the "copy_from_user()" above.
		 */
		if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
			goto end_user;

2620
		for (i = 0; i < args->buffer_count; i++) {
2621 2622 2623
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2624
			exec2_list[i].offset =
2625 2626 2627 2628
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
2629
		}
2630 2631
end_user:
		user_access_end();
2632 2633
	}

2634
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2635
	put_fence_array(args, fences);
M
Michal Hocko 已提交
2636
	kvfree(exec2_list);
2637
	return err;
2638
}