i915_gem_execbuffer.c 72.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
31
#include <linux/sync_file.h>
32 33
#include <linux/uaccess.h>

34
#include <drm/drmP.h>
35
#include <drm/drm_syncobj.h>
36
#include <drm/i915_drm.h>
37

38
#include "i915_drv.h"
39
#include "i915_gem_clflush.h"
40 41
#include "i915_trace.h"
#include "intel_drv.h"
42
#include "intel_frontbuffer.h"
43

44 45 46 47 48 49
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
50

51 52 53 54 55 56
#define __EXEC_OBJECT_HAS_REF		BIT(31)
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
57 58 59 60 61
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
#define __EXEC_VALIDATED	BIT(30)
#define UPDATE			PIN_OFFSET_FIXED
62 63

#define BATCH_OFFSET_BIAS (256*1024)
64

65 66
#define __I915_EXEC_ILLEGAL_FLAGS \
	(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
67

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

190
struct i915_execbuffer {
191 192 193 194
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
195 196
	struct i915_vma **vma;
	unsigned int *flags;
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218

	struct intel_engine_cs *engine; /** engine to queue the request to */
	struct i915_gem_context *ctx; /** context for building the request */
	struct i915_address_space *vm; /** GTT and vma for the request */

	struct drm_i915_gem_request *request; /** our request to build */
	struct i915_vma *batch; /** identity of the batch obj/vma */

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
219
	struct reloc_cache {
220 221 222
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
223
		unsigned int gen; /** Cached value of INTEL_GEN */
224
		bool use_64bit_reloc : 1;
225 226 227
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
228 229 230 231

		struct drm_i915_gem_request *rq;
		u32 *rq_cmd;
		unsigned int rq_size;
232
	} reloc_cache;
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
248 249
};

250
#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
251

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/*
 * Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline u64 gen8_canonical_addr(u64 address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline u64 gen8_noncanonical_addr(u64 address)
{
	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}

271
static int eb_create(struct i915_execbuffer *eb)
272
{
273 274
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
275

276 277 278 279 280 281 282 283 284 285 286
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
287
		do {
288 289 290 291 292 293 294 295 296 297 298 299
			unsigned int flags;

			/* While we can still reduce the allocation size, don't
			 * raise a warning and allow the allocation to fail.
			 * On the last pass though, we want to try as hard
			 * as possible to perform the allocation and warn
			 * if it fails.
			 */
			flags = GFP_TEMPORARY;
			if (size > 1)
				flags |= __GFP_NORETRY | __GFP_NOWARN;

300
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
301
					      flags);
302 303 304 305
			if (eb->buckets)
				break;
		} while (--size);

306 307
		if (unlikely(!size))
			return -ENOMEM;
308

309
		eb->lut_size = size;
310
	} else {
311
		eb->lut_size = -eb->buffer_count;
312
	}
313

314
	return 0;
315 316
}

317 318
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
319 320
		 const struct i915_vma *vma,
		 unsigned int flags)
321 322 323 324 325 326 327
{
	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

328
	if (flags & EXEC_OBJECT_PINNED &&
329 330 331
	    vma->node.start != entry->offset)
		return true;

332
	if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
333 334 335
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

336
	if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
337 338 339 340 341 342
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

	return false;
}

343
static inline bool
344
eb_pin_vma(struct i915_execbuffer *eb,
345
	   const struct drm_i915_gem_exec_object2 *entry,
346 347
	   struct i915_vma *vma)
{
348 349
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
350

351
	if (vma->node.size)
352
		pin_flags = vma->node.start;
353
	else
354
		pin_flags = entry->offset & PIN_OFFSET_MASK;
355

356 357 358
	pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
		pin_flags |= PIN_GLOBAL;
359

360 361
	if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
		return false;
362

363
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
364 365
		if (unlikely(i915_vma_get_fence(vma))) {
			i915_vma_unpin(vma);
366
			return false;
367 368 369
		}

		if (i915_vma_pin_fence(vma))
370
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
371 372
	}

373 374
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	return !eb_vma_misplaced(entry, vma, exec_flags);
375 376
}

377
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
378
{
379
	GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
380

381
	if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
382 383
		i915_vma_unpin_fence(vma);

384
	__i915_vma_unpin(vma);
385 386
}

387
static inline void
388
eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
389
{
390
	if (!(*flags & __EXEC_OBJECT_HAS_PIN))
391
		return;
392

393 394
	__eb_unreserve_vma(vma, *flags);
	*flags &= ~__EXEC_OBJECT_RESERVED;
395 396
}

397 398 399 400
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
401
{
402 403
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
404

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
		     entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
422 423
	}

424
	if (unlikely(vma->exec_flags)) {
425 426 427 428 429 430 431 432 433 434 435 436
		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
			  entry->handle, (int)(entry - eb->exec));
		return -EINVAL;
	}

	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

437 438 439 440 441 442 443 444 445 446 447 448
	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

449
	return 0;
450 451
}

452
static int
453
eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
454
{
455
	struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
456 457 458 459 460 461 462 463
	int err;

	GEM_BUG_ON(i915_vma_is_closed(vma));

	if (!(eb->args->flags & __EXEC_VALIDATED)) {
		err = eb_validate_vma(eb, entry, vma);
		if (unlikely(err))
			return err;
464 465
	}

466
	if (eb->lut_size > 0) {
467
		vma->exec_handle = entry->handle;
468
		hlist_add_head(&vma->exec_node,
469 470
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
471
	}
472

473 474 475 476 477 478 479 480 481
	if (entry->relocation_count)
		list_add_tail(&vma->reloc_link, &eb->relocs);

	/*
	 * Stash a pointer from the vma to execobj, so we can query its flags,
	 * size, alignment etc as provided by the user. Also we stash a pointer
	 * to the vma inside the execobj so that we can use a direct lookup
	 * to find the right target VMA when doing relocations.
	 */
482 483 484
	eb->vma[i] = vma;
	eb->flags[i] = entry->flags;
	vma->exec_flags = &eb->flags[i];
485 486

	err = 0;
487
	if (eb_pin_vma(eb, entry, vma)) {
488 489 490 491
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
492 493 494 495 496 497
	} else {
		eb_unreserve_vma(vma, vma->exec_flags);

		list_add_tail(&vma->exec_link, &eb->unbound);
		if (drm_mm_node_allocated(&vma->node))
			err = i915_vma_unbind(vma);
498 499 500 501 502 503 504 505 506 507
	}
	return err;
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

508 509 510 511 512
	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;
513 514 515 516 517 518 519 520 521

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
			  struct i915_vma *vma)
{
522 523 524
	struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
	unsigned int exec_flags = *vma->exec_flags;
	u64 pin_flags;
525 526
	int err;

527 528 529
	pin_flags = PIN_USER | PIN_NONBLOCK;
	if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
		pin_flags |= PIN_GLOBAL;
530 531 532 533 534

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
535 536
	if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		pin_flags |= PIN_ZONE_4G;
537

538 539
	if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
		pin_flags |= PIN_MAPPABLE;
540

541 542 543 544 545
	if (exec_flags & EXEC_OBJECT_PINNED) {
		pin_flags |= entry->offset | PIN_OFFSET_FIXED;
		pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
	} else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
		pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
546 547
	}

548 549 550
	err = i915_vma_pin(vma,
			   entry->pad_to_size, entry->alignment,
			   pin_flags);
551 552 553 554 555 556 557 558
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

559
	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
560 561 562 563 564 565 566
		err = i915_vma_get_fence(vma);
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

		if (i915_vma_pin_fence(vma))
567
			exec_flags |= __EXEC_OBJECT_HAS_FENCE;
568 569
	}

570 571
	*vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
572

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	struct list_head last;
	struct i915_vma *vma;
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
		list_for_each_entry(vma, &eb->unbound, exec_link) {
			err = eb_reserve_vma(eb, vma);
			if (err)
				break;
		}
		if (err != -ENOSPC)
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
613 614
			unsigned int flags = eb->flags[i];
			struct i915_vma *vma = eb->vma[i];
615

616 617
			if (flags & EXEC_OBJECT_PINNED &&
			    flags & __EXEC_OBJECT_HAS_PIN)
618 619
				continue;

620
			eb_unreserve_vma(vma, &eb->flags[i]);
621

622
			if (flags & EXEC_OBJECT_PINNED)
623
				list_add(&vma->exec_link, &eb->unbound);
624
			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
				list_add_tail(&vma->exec_link, &eb->unbound);
			else
				list_add_tail(&vma->exec_link, &last);
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
			err = i915_gem_evict_vm(eb->vm);
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
	} while (1);
646
}
647

648
static inline struct hlist_head *
649
ht_head(const  struct i915_gem_context_vma_lut *lut, u32 handle)
650
{
651
	return &lut->ht[hash_32(handle, lut->ht_bits)];
652 653 654
}

static inline bool
655
ht_needs_resize(const struct i915_gem_context_vma_lut *lut)
656
{
657 658
	return (4*lut->ht_count > 3*lut->ht_size ||
		4*lut->ht_count + 1 < lut->ht_size);
659 660
}

661 662
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
663 664 665 666
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
667 668 669 670 671 672 673
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
674 675
	if (unlikely(!ctx))
		return -ENOENT;
676

677
	eb->ctx = ctx;
678 679 680 681 682 683 684 685 686 687
	eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;

	eb->context_flags = 0;
	if (ctx->flags & CONTEXT_NO_ZEROMAP)
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
688
{
689
#define INTERMEDIATE BIT(0)
690 691
	const unsigned int count = eb->buffer_count;
	struct i915_gem_context_vma_lut *lut = &eb->ctx->vma_lut;
692
	struct i915_vma *vma;
693 694
	struct idr *idr;
	unsigned int i;
695
	int slow_pass = -1;
696
	int err;
697

698 699 700 701 702 703
	if (unlikely(i915_gem_context_is_closed(eb->ctx)))
		return -ENOENT;

	if (unlikely(i915_gem_context_is_banned(eb->ctx)))
		return -EIO;

704 705
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
706

707 708 709
	if (unlikely(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS))
		flush_work(&lut->resize);
	GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
710 711 712

	for (i = 0; i < count; i++) {
		hlist_for_each_entry(vma,
713
				     ht_head(lut, eb->exec[i].handle),
714 715 716 717
				     ctx_node) {
			if (vma->ctx_handle != eb->exec[i].handle)
				continue;

718
			err = eb_add_vma(eb, i, vma);
719 720
			if (unlikely(err))
				return err;
721 722 723 724 725 726 727 728 729
			goto next_vma;
		}

		if (slow_pass < 0)
			slow_pass = i;
next_vma: ;
	}

	if (slow_pass < 0)
730
		goto out;
731

732
	spin_lock(&eb->file->table_lock);
733 734 735 736 737
	/*
	 * Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC
	 */
	idr = &eb->file->object_idr;
738 739
	for (i = slow_pass; i < count; i++) {
		struct drm_i915_gem_object *obj;
740

741
		if (eb->vma[i])
742 743
			continue;

744
		obj = to_intel_bo(idr_find(idr, eb->exec[i].handle));
745
		if (unlikely(!obj)) {
746
			spin_unlock(&eb->file->table_lock);
747 748
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				  eb->exec[i].handle, i);
749 750
			err = -ENOENT;
			goto err;
751 752
		}

753 754
		eb->vma[i] = (struct i915_vma *)
			ptr_pack_bits(obj, INTERMEDIATE, 1);
755
	}
756
	spin_unlock(&eb->file->table_lock);
757

758 759
	for (i = slow_pass; i < count; i++) {
		struct drm_i915_gem_object *obj;
760
		unsigned int is_obj;
761

762 763
		obj = (typeof(obj))ptr_unpack_bits(eb->vma[i], &is_obj, 1);
		if (!is_obj)
764
			continue;
765

766 767 768 769 770 771 772 773
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
774
		vma = i915_vma_instance(obj, eb->vm, NULL);
C
Chris Wilson 已提交
775
		if (unlikely(IS_ERR(vma))) {
776
			DRM_DEBUG("Failed to lookup VMA\n");
777 778
			err = PTR_ERR(vma);
			goto err;
779 780
		}

781 782 783 784 785
		/* First come, first served */
		if (!vma->ctx) {
			vma->ctx = eb->ctx;
			vma->ctx_handle = eb->exec[i].handle;
			hlist_add_head(&vma->ctx_node,
786 787 788
				       ht_head(lut, eb->exec[i].handle));
			lut->ht_count++;
			lut->ht_size |= I915_CTX_RESIZE_IN_PROGRESS;
789 790 791 792
			if (i915_vma_is_ggtt(vma)) {
				GEM_BUG_ON(obj->vma_hashed);
				obj->vma_hashed = vma;
			}
793 794

			i915_vma_get(vma);
795
		}
796

797
		err = eb_add_vma(eb, i, vma);
798 799
		if (unlikely(err))
			goto err;
800

801 802 803
		GEM_BUG_ON(vma != eb->vma[i]);
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);

804 805 806
		/* Only after we validated the user didn't use our bits */
		if (vma->ctx != eb->ctx) {
			i915_vma_get(vma);
807
			*vma->exec_flags |= __EXEC_OBJECT_HAS_REF;
808
		}
809 810
	}

811 812 813 814 815
	if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
		if (ht_needs_resize(lut))
			queue_work(system_highpri_wq, &lut->resize);
		else
			lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
816 817
	}

818 819 820
out:
	/* take note of the batch buffer before we might reorder the lists */
	i = eb_batch_index(eb);
821 822
	eb->batch = eb->vma[i];
	GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
823

824
	/*
825 826 827 828 829 830 831
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
832
	 */
833 834
	if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
		eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
835
	if (eb->reloc_cache.has_fence)
836
		eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
837

838 839 840 841 842
	eb->args->flags |= __EXEC_VALIDATED;
	return eb_reserve(eb);

err:
	for (i = slow_pass; i < count; i++) {
843 844
		if (ptr_unmask_bits(eb->vma[i], 1))
			eb->vma[i] = NULL;
845 846 847 848
	}
	lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
	return err;
#undef INTERMEDIATE
849 850
}

851
static struct i915_vma *
852
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
853
{
854 855
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
856
			return NULL;
857
		return eb->vma[handle];
858 859
	} else {
		struct hlist_head *head;
860
		struct i915_vma *vma;
861

862
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
863
		hlist_for_each_entry(vma, head, exec_node) {
864 865
			if (vma->exec_handle == handle)
				return vma;
866 867 868
		}
		return NULL;
	}
869 870
}

871
static void eb_release_vmas(const struct i915_execbuffer *eb)
872
{
873 874 875 876
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
877 878
		struct i915_vma *vma = eb->vma[i];
		unsigned int flags = eb->flags[i];
879

880
		if (!vma)
881
			continue;
882

883 884 885
		GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
		vma->exec_flags = NULL;
		eb->vma[i] = NULL;
886

887 888
		if (flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, flags);
889

890
		if (flags & __EXEC_OBJECT_HAS_REF)
891
			i915_vma_put(vma);
892
	}
893 894
}

895
static void eb_reset_vmas(const struct i915_execbuffer *eb)
896
{
897
	eb_release_vmas(eb);
898
	if (eb->lut_size > 0)
899 900
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_size);
901 902
}

903
static void eb_destroy(const struct i915_execbuffer *eb)
904
{
905 906
	GEM_BUG_ON(eb->reloc_cache.rq);

907
	if (eb->lut_size > 0)
908
		kfree(eb->buckets);
909 910
}

911
static inline u64
912
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
913
		  const struct i915_vma *target)
914
{
915
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
916 917
}

918 919
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
920
{
921
	cache->page = -1;
922
	cache->vaddr = 0;
923
	/* Must be a variable in the struct to allow GCC to unroll. */
924
	cache->gen = INTEL_GEN(i915);
925
	cache->has_llc = HAS_LLC(i915);
926
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
927 928
	cache->has_fence = cache->gen < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
929
	cache->node.allocated = false;
930 931
	cache->rq = NULL;
	cache->rq_size = 0;
932
}
933

934 935 936 937 938 939 940 941
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
942 943
}

944 945
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

946 947 948 949 950 951 952
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

953 954 955 956 957 958 959 960 961 962 963
static void reloc_gpu_flush(struct reloc_cache *cache)
{
	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
	i915_gem_object_unpin_map(cache->rq->batch->obj);
	i915_gem_chipset_flush(cache->rq->i915);

	__i915_add_request(cache->rq, true);
	cache->rq = NULL;
}

964
static void reloc_cache_reset(struct reloc_cache *cache)
965
{
966
	void *vaddr;
967

968 969 970
	if (cache->rq)
		reloc_gpu_flush(cache);

971 972
	if (!cache->vaddr)
		return;
973

974 975 976 977
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
978

979 980 981
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
982
		wmb();
983
		io_mapping_unmap_atomic((void __iomem *)vaddr);
984
		if (cache->node.allocated) {
985
			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
986 987 988

			ggtt->base.clear_range(&ggtt->base,
					       cache->node.start,
989
					       cache->node.size);
990 991 992
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
993
		}
994
	}
995 996 997

	cache->vaddr = 0;
	cache->page = -1;
998 999 1000 1001
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
1002
			unsigned long page)
1003
{
1004 1005 1006 1007 1008 1009
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
1010
		int err;
1011

1012 1013 1014
		err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
		if (err)
			return ERR_PTR(err);
1015 1016 1017

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
1018

1019 1020 1021 1022
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
1023 1024
	}

1025 1026
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1027
	cache->page = page;
1028

1029
	return vaddr;
1030 1031
}

1032 1033
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1034
			 unsigned long page)
1035
{
1036
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1037
	unsigned long offset;
1038
	void *vaddr;
1039

1040
	if (cache->vaddr) {
1041
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1042 1043
	} else {
		struct i915_vma *vma;
1044
		int err;
1045

1046
		if (use_cpu_reloc(cache, obj))
1047
			return NULL;
1048

1049 1050 1051
		err = i915_gem_object_set_to_gtt_domain(obj, true);
		if (err)
			return ERR_PTR(err);
1052

1053 1054
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
					       PIN_MAPPABLE | PIN_NONBLOCK);
1055 1056
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1057
			err = drm_mm_insert_node_in_range
1058
				(&ggtt->base.mm, &cache->node,
1059
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1060
				 0, ggtt->mappable_end,
1061
				 DRM_MM_INSERT_LOW);
1062
			if (err) /* no inactive aperture space, use cpu reloc */
1063
				return NULL;
1064
		} else {
1065 1066
			err = i915_vma_put_fence(vma);
			if (err) {
1067
				i915_vma_unpin(vma);
1068
				return ERR_PTR(err);
1069
			}
1070

1071 1072
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1073
		}
1074
	}
1075

1076 1077
	offset = cache->node.start;
	if (cache->node.allocated) {
1078
		wmb();
1079 1080 1081 1082 1083
		ggtt->base.insert_page(&ggtt->base,
				       i915_gem_object_get_dma_address(obj, page),
				       offset, I915_CACHE_NONE, 0);
	} else {
		offset += page << PAGE_SHIFT;
1084 1085
	}

1086 1087
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
							 offset);
1088 1089
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1090

1091
	return vaddr;
1092 1093
}

1094 1095
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1096
			 unsigned long page)
1097
{
1098
	void *vaddr;
1099

1100 1101 1102 1103 1104 1105 1106 1107
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1108 1109
	}

1110
	return vaddr;
1111 1112
}

1113
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1114
{
1115 1116 1117 1118 1119
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1120

1121
		*addr = value;
1122

1123 1124
		/*
		 * Writes to the same cacheline are serialised by the CPU
1125 1126 1127 1128 1129 1130 1131 1132 1133
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1134 1135
}

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     struct i915_vma *vma,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	struct drm_i915_gem_object *obj;
	struct drm_i915_gem_request *rq;
	struct i915_vma *batch;
	u32 *cmd;
	int err;

	GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);

	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	cmd = i915_gem_object_pin_map(obj,
				      cache->has_llc ? I915_MAP_WB : I915_MAP_WC);
	i915_gem_object_unpin_pages(obj);
	if (IS_ERR(cmd))
		return PTR_ERR(cmd);

	err = i915_gem_object_set_to_wc_domain(obj, false);
	if (err)
		goto err_unmap;

	batch = i915_vma_instance(obj, vma->vm, NULL);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
	}

	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
	if (err)
		goto err_unmap;

	rq = i915_gem_request_alloc(eb->engine, eb->ctx);
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

	err = i915_gem_request_await_object(rq, vma->obj, true);
	if (err)
		goto err_request;

	err = eb->engine->emit_flush(rq, EMIT_INVALIDATE);
	if (err)
		goto err_request;

	err = i915_switch_context(rq);
	if (err)
		goto err_request;

	err = eb->engine->emit_bb_start(rq,
					batch->node.start, PAGE_SIZE,
					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
	if (err)
		goto err_request;

1197
	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
1198
	i915_vma_move_to_active(batch, rq, 0);
1199 1200 1201
	reservation_object_lock(batch->resv, NULL);
	reservation_object_add_excl_fence(batch->resv, &rq->fence);
	reservation_object_unlock(batch->resv);
1202 1203
	i915_vma_unpin(batch);

1204
	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1205 1206 1207
	reservation_object_lock(vma->resv, NULL);
	reservation_object_add_excl_fence(vma->resv, &rq->fence);
	reservation_object_unlock(vma->resv);
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250

	rq->batch = batch;

	cache->rq = rq;
	cache->rq_cmd = cmd;
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
	return 0;

err_request:
	i915_add_request(rq);
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
	i915_gem_object_unpin_map(obj);
	return err;
}

static u32 *reloc_gpu(struct i915_execbuffer *eb,
		      struct i915_vma *vma,
		      unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	u32 *cmd;

	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
		reloc_gpu_flush(cache);

	if (unlikely(!cache->rq)) {
		int err;

		err = __reloc_gpu_alloc(eb, vma, len);
		if (unlikely(err))
			return ERR_PTR(err);
	}

	cmd = cache->rq_cmd + cache->rq_size;
	cache->rq_size += len;

	return cmd;
}

1251 1252
static u64
relocate_entry(struct i915_vma *vma,
1253
	       const struct drm_i915_gem_relocation_entry *reloc,
1254 1255
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1256
{
1257
	u64 offset = reloc->offset;
1258 1259
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1260
	void *vaddr;
1261

1262 1263
	if (!eb->reloc_cache.vaddr &&
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1264 1265 1266
	     !reservation_object_test_signaled_rcu(vma->resv, true)) &&
	    __intel_engine_can_store_dword(eb->reloc_cache.gen,
					   eb->engine->class)) {
1267 1268 1269 1270 1271 1272 1273 1274 1275
		const unsigned int gen = eb->reloc_cache.gen;
		unsigned int len;
		u32 *batch;
		u64 addr;

		if (wide)
			len = offset & 7 ? 8 : 5;
		else if (gen >= 4)
			len = 4;
1276
		else
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
			len = 3;

		batch = reloc_gpu(eb, vma, len);
		if (IS_ERR(batch))
			goto repeat;

		addr = gen8_canonical_addr(vma->node.start + offset);
		if (wide) {
			if (offset & 7) {
				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);

				addr = gen8_canonical_addr(addr + 4);

				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = upper_32_bits(target_offset);
			} else {
				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);
				*batch++ = upper_32_bits(target_offset);
			}
		} else if (gen >= 6) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else if (gen >= 4) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else {
			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
			*batch++ = addr;
			*batch++ = target_offset;
		}

		goto out;
	}

1323
repeat:
1324
	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1325 1326 1327 1328 1329
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1330
			eb->reloc_cache.vaddr);
1331 1332 1333 1334 1335 1336

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1337 1338
	}

1339
out:
1340
	return target->node.start | UPDATE;
1341 1342
}

1343 1344 1345 1346
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
		  struct i915_vma *vma,
		  const struct drm_i915_gem_relocation_entry *reloc)
1347
{
1348
	struct i915_vma *target;
1349
	int err;
1350

1351
	/* we've already hold a reference to all valid objects */
1352 1353
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1354
		return -ENOENT;
1355

1356
	/* Validate that the target is in a valid r/w GPU domain */
1357
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1358
		DRM_DEBUG("reloc with multiple write domains: "
1359
			  "target %d offset %d "
1360
			  "read %08x write %08x",
1361
			  reloc->target_handle,
1362 1363 1364
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1365
		return -EINVAL;
1366
	}
1367 1368
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1369
		DRM_DEBUG("reloc with read/write non-GPU domains: "
1370
			  "target %d offset %d "
1371
			  "read %08x write %08x",
1372
			  reloc->target_handle,
1373 1374 1375
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1376
		return -EINVAL;
1377 1378
	}

1379
	if (reloc->write_domain) {
1380
		*target->exec_flags |= EXEC_OBJECT_WRITE;
1381

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
		    IS_GEN6(eb->i915)) {
			err = i915_vma_bind(target, target->obj->cache_level,
					    PIN_GLOBAL);
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1396
	}
1397

1398 1399
	/*
	 * If the relocation already has the right value in it, no
1400 1401
	 * more work needs to be done.
	 */
1402 1403
	if (!DBG_FORCE_RELOC &&
	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1404
		return 0;
1405 1406

	/* Check that the relocation address is valid... */
1407
	if (unlikely(reloc->offset >
1408
		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1409
		DRM_DEBUG("Relocation beyond object bounds: "
1410 1411 1412 1413
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
			  (int)vma->size);
1414
		return -EINVAL;
1415
	}
1416
	if (unlikely(reloc->offset & 3)) {
1417
		DRM_DEBUG("Relocation not 4-byte aligned: "
1418 1419 1420
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1421
		return -EINVAL;
1422 1423
	}

1424 1425 1426 1427 1428 1429 1430 1431
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
	 * of our synchronisation.
	 */
1432
	*vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1433

1434
	/* and update the user's relocation entry */
1435
	return relocate_entry(vma, reloc, eb, target);
1436 1437
}

1438
static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1439
{
1440
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1441 1442
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
1443
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1444
	unsigned int remain;
1445

1446
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1447
	remain = entry->relocation_count;
1448 1449
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1450

1451 1452 1453 1454 1455
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
1456
	if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
1457 1458 1459 1460 1461 1462 1463
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1464

1465 1466
		/*
		 * This is the fast path and we cannot handle a pagefault
1467 1468 1469 1470 1471 1472 1473
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1474
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1475
		pagefault_enable();
1476 1477
		if (unlikely(copied)) {
			remain = -EFAULT;
1478 1479
			goto out;
		}
1480

1481
		remain -= count;
1482
		do {
1483
			u64 offset = eb_relocate_entry(eb, vma, r);
1484

1485 1486 1487
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1488
				goto out;
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
				__put_user(offset,
					   &urelocs[r-stack].presumed_offset);
1514
			}
1515 1516 1517
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1518
out:
1519
	reloc_cache_reset(&eb->reloc_cache);
1520
	return remain;
1521 1522 1523
}

static int
1524
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1525
{
1526
	const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1527 1528 1529 1530
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1531 1532

	for (i = 0; i < entry->relocation_count; i++) {
1533
		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1534

1535 1536 1537 1538
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1539
	}
1540 1541 1542 1543
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1544 1545
}

1546
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1547
{
1548 1549 1550
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1551

1552 1553 1554
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1555

1556 1557
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1558

1559 1560 1561 1562
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
	if (!access_ok(VERIFY_READ, addr, size))
		return -EFAULT;
1563

1564 1565 1566 1567 1568
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1569
	}
1570
	return __get_user(c, end - 1);
1571
}
1572

1573
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1574
{
1575 1576 1577
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1578

1579 1580 1581 1582 1583 1584
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		struct drm_i915_gem_relocation_entry *relocs;
		unsigned long size;
		unsigned long copied;
1585

1586 1587
		if (nreloc == 0)
			continue;
1588

1589 1590 1591
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1592

1593 1594
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1595

1596 1597 1598 1599 1600 1601
		relocs = kvmalloc_array(size, 1, GFP_TEMPORARY);
		if (!relocs) {
			kvfree(relocs);
			err = -ENOMEM;
			goto err;
		}
1602

1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
					     (char *)urelocs + copied,
					     len)) {
				kvfree(relocs);
				err = -EFAULT;
				goto err;
			}
1616

1617 1618
			copied += len;
		} while (copied < size);
1619

1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		user_access_begin();
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
end_user:
		user_access_end();
1637

1638 1639
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1640

1641
	return 0;
1642

1643 1644 1645 1646 1647 1648 1649 1650
err:
	while (i--) {
		struct drm_i915_gem_relocation_entry *relocs =
			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1651 1652
}

1653
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1654
{
1655 1656
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1657

1658 1659
	if (unlikely(i915.prefault_disable))
		return 0;
1660

1661 1662
	for (i = 0; i < count; i++) {
		int err;
1663

1664 1665 1666 1667
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1668

1669
	return 0;
1670 1671
}

1672
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1673
{
1674
	struct drm_device *dev = &eb->i915->drm;
1675
	bool have_copy = false;
1676
	struct i915_vma *vma;
1677 1678 1679 1680 1681 1682 1683
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1684

1685
	/* We may process another execbuffer during the unlock... */
1686
	eb_reset_vmas(eb);
1687 1688
	mutex_unlock(&dev->struct_mutex);

1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1710
	}
1711 1712 1713
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1714 1715
	}

1716 1717 1718
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1719 1720
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1721
		mutex_lock(&dev->struct_mutex);
1722
		goto out;
1723 1724
	}

1725
	/* reacquire the objects */
1726 1727
	err = eb_lookup_vmas(eb);
	if (err)
1728
		goto err;
1729

1730 1731
	GEM_BUG_ON(!eb->batch);

1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
	list_for_each_entry(vma, &eb->relocs, reloc_link) {
		if (!have_copy) {
			pagefault_disable();
			err = eb_relocate_vma(eb, vma);
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
			err = eb_relocate_vma_slow(eb, vma);
			if (err)
				goto err;
		}
1744 1745
	}

1746 1747
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1748 1749 1750 1751 1752 1753
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

1775
	return err;
1776 1777
}

1778
static int eb_relocate(struct i915_execbuffer *eb)
1779
{
1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
	if (eb_lookup_vmas(eb))
		goto slow;

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, vma))
				goto slow;
		}
	}

	return 0;

slow:
	return eb_relocate_slow(eb);
}

1799
static void eb_export_fence(struct i915_vma *vma,
1800 1801 1802
			    struct drm_i915_gem_request *req,
			    unsigned int flags)
{
1803
	struct reservation_object *resv = vma->resv;
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822

	/*
	 * Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
	reservation_object_lock(resv, NULL);
	if (flags & EXEC_OBJECT_WRITE)
		reservation_object_add_excl_fence(resv, &req->fence);
	else if (reservation_object_reserve_shared(resv) == 0)
		reservation_object_add_shared_fence(resv, &req->fence);
	reservation_object_unlock(resv);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1823

1824
	for (i = 0; i < count; i++) {
1825 1826
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];
1827
		struct drm_i915_gem_object *obj = vma->obj;
1828

1829
		if (flags & EXEC_OBJECT_CAPTURE) {
1830 1831 1832 1833 1834 1835
			struct i915_gem_capture_list *capture;

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (unlikely(!capture))
				return -ENOMEM;

1836
			capture->next = eb->request->capture_list;
1837
			capture->vma = eb->vma[i];
1838
			eb->request->capture_list = capture;
1839 1840
		}

1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
		/*
		 * If the GPU is not _reading_ through the CPU cache, we need
		 * to make sure that any writes (both previous GPU writes from
		 * before a change in snooping levels and normal CPU writes)
		 * caught in that cache are flushed to main memory.
		 *
		 * We want to say
		 *   obj->cache_dirty &&
		 *   !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		 * but gcc's optimiser doesn't handle that as well and emits
		 * two jumps instead of one. Maybe one day...
		 */
		if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1854
			if (i915_gem_clflush_object(obj, 0))
1855
				flags &= ~EXEC_OBJECT_ASYNC;
1856 1857
		}

1858 1859
		if (flags & EXEC_OBJECT_ASYNC)
			continue;
1860

1861
		err = i915_gem_request_await_object
1862
			(eb->request, obj, flags & EXEC_OBJECT_WRITE);
1863 1864 1865 1866 1867
		if (err)
			return err;
	}

	for (i = 0; i < count; i++) {
1868 1869 1870 1871 1872
		unsigned int flags = eb->flags[i];
		struct i915_vma *vma = eb->vma[i];

		i915_vma_move_to_active(vma, eb->request, flags);
		eb_export_fence(vma, eb->request, flags);
1873

1874 1875 1876 1877
		__eb_unreserve_vma(vma, flags);
		vma->exec_flags = NULL;

		if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1878
			i915_vma_put(vma);
1879
	}
1880
	eb->exec = NULL;
1881

1882
	/* Unconditionally flush any chipset caches (for streaming writes). */
1883
	i915_gem_chipset_flush(eb->i915);
1884

1885
	/* Unconditionally invalidate GPU caches and TLBs. */
1886
	return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
1887 1888
}

1889
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1890
{
1891
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1892 1893
		return false;

C
Chris Wilson 已提交
1894
	/* Kernel clipping was a DRI1 misfeature */
1895 1896 1897 1898
	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
		if (exec->num_cliprects || exec->cliprects_ptr)
			return false;
	}
C
Chris Wilson 已提交
1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1911 1912
}

1913 1914 1915 1916 1917 1918 1919
void i915_vma_move_to_active(struct i915_vma *vma,
			     struct drm_i915_gem_request *req,
			     unsigned int flags)
{
	struct drm_i915_gem_object *obj = vma->obj;
	const unsigned int idx = req->engine->id;

1920
	lockdep_assert_held(&req->i915->drm.struct_mutex);
1921 1922
	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

1923 1924
	/*
	 * Add a reference if we're newly entering the active list.
1925 1926 1927 1928 1929 1930
	 * The order in which we add operations to the retirement queue is
	 * vital here: mark_active adds to the start of the callback list,
	 * such that subsequent callbacks are called first. Therefore we
	 * add the active reference first and queue for it to be dropped
	 * *last*.
	 */
1931 1932 1933 1934 1935
	if (!i915_vma_is_active(vma))
		obj->active_count++;
	i915_vma_set_active(vma, idx);
	i915_gem_active_set(&vma->last_read[idx], req);
	list_move_tail(&vma->vm_link, &vma->vm->active_list);
1936

1937
	obj->base.write_domain = 0;
1938
	if (flags & EXEC_OBJECT_WRITE) {
1939 1940
		obj->base.write_domain = I915_GEM_DOMAIN_RENDER;

1941 1942
		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
			i915_gem_active_set(&obj->frontbuffer_write, req);
1943

1944
		obj->base.read_domains = 0;
1945
	}
1946
	obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
1947

1948 1949
	if (flags & EXEC_OBJECT_NEEDS_FENCE)
		i915_gem_active_set(&vma->last_fence, req);
1950 1951
}

1952
static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1953
{
1954 1955
	u32 *cs;
	int i;
1956

1957
	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1958 1959 1960
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1961

1962
	cs = intel_ring_begin(req, 4 * 2 + 2);
1963 1964
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1965

1966
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1967
	for (i = 0; i < 4; i++) {
1968 1969
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1970
	}
1971
	*cs++ = MI_NOOP;
1972
	intel_ring_advance(req, cs);
1973 1974 1975 1976

	return 0;
}

1977
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1978 1979
{
	struct drm_i915_gem_object *shadow_batch_obj;
1980
	struct i915_vma *vma;
1981
	int err;
1982

1983 1984
	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
1985
	if (IS_ERR(shadow_batch_obj))
1986
		return ERR_CAST(shadow_batch_obj);
1987

1988
	err = intel_engine_cmd_parser(eb->engine,
1989
				      eb->batch->obj,
1990
				      shadow_batch_obj,
1991 1992
				      eb->batch_start_offset,
				      eb->batch_len,
1993
				      is_master);
1994 1995
	if (err) {
		if (err == -EACCES) /* unhandled chained batch */
C
Chris Wilson 已提交
1996 1997
			vma = NULL;
		else
1998
			vma = ERR_PTR(err);
C
Chris Wilson 已提交
1999 2000
		goto out;
	}
2001

C
Chris Wilson 已提交
2002 2003 2004
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
2005

2006 2007 2008 2009 2010
	eb->vma[eb->buffer_count] = i915_vma_get(vma);
	eb->flags[eb->buffer_count] =
		__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
	vma->exec_flags = &eb->flags[eb->buffer_count];
	eb->buffer_count++;
2011

C
Chris Wilson 已提交
2012
out:
C
Chris Wilson 已提交
2013
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
2014
	return vma;
2015
}
2016

2017
static void
2018
add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
2019 2020 2021 2022 2023
{
	req->file_priv = file->driver_priv;
	list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
}

2024
static int eb_submit(struct i915_execbuffer *eb)
2025
{
2026
	int err;
2027

2028 2029 2030
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
2031

2032 2033 2034
	err = i915_switch_context(eb->request);
	if (err)
		return err;
2035

2036
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2037 2038 2039
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
2040 2041
	}

2042
	err = eb->engine->emit_bb_start(eb->request,
2043 2044 2045
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
2046 2047 2048
					eb->batch_flags);
	if (err)
		return err;
2049

C
Chris Wilson 已提交
2050
	return 0;
2051 2052
}

2053 2054
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
2055
 * The engine index is returned.
2056
 */
2057
static unsigned int
2058 2059
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
2060 2061 2062
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

2063
	/* Check whether the file_priv has already selected one ring. */
2064 2065 2066
	if ((int)file_priv->bsd_engine < 0)
		file_priv->bsd_engine = atomic_fetch_xor(1,
			 &dev_priv->mm.bsd_engine_dispatch_index);
2067

2068
	return file_priv->bsd_engine;
2069 2070
}

2071 2072
#define I915_USER_RINGS (4)

2073
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
2074 2075 2076 2077 2078 2079 2080
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

2081 2082 2083 2084
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
2085 2086
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2087
	struct intel_engine_cs *engine;
2088 2089 2090

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
2091
		return NULL;
2092 2093 2094 2095 2096 2097
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
2098
		return NULL;
2099 2100 2101 2102 2103 2104
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2105
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
2106 2107
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2108
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2109 2110 2111 2112
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
2113
			return NULL;
2114 2115
		}

2116
		engine = dev_priv->engine[_VCS(bsd_idx)];
2117
	} else {
2118
		engine = dev_priv->engine[user_ring_map[user_ring_id]];
2119 2120
	}

2121
	if (!engine) {
2122
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
2123
		return NULL;
2124 2125
	}

2126
	return engine;
2127 2128
}

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
static void
__free_fence_array(struct drm_syncobj **fences, unsigned int n)
{
	while (n--)
		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
	kvfree(fences);
}

static struct drm_syncobj **
get_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_file *file)
{
	const unsigned int nfences = args->num_cliprects;
	struct drm_i915_gem_exec_fence __user *user;
	struct drm_syncobj **fences;
	unsigned int n;
	int err;

	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
		return NULL;

	if (nfences > SIZE_MAX / sizeof(*fences))
		return ERR_PTR(-EINVAL);

	user = u64_to_user_ptr(args->cliprects_ptr);
	if (!access_ok(VERIFY_READ, user, nfences * 2 * sizeof(u32)))
		return ERR_PTR(-EFAULT);

	fences = kvmalloc_array(args->num_cliprects, sizeof(*fences),
				__GFP_NOWARN | GFP_TEMPORARY);
	if (!fences)
		return ERR_PTR(-ENOMEM);

	for (n = 0; n < nfences; n++) {
		struct drm_i915_gem_exec_fence fence;
		struct drm_syncobj *syncobj;

		if (__copy_from_user(&fence, user++, sizeof(fence))) {
			err = -EFAULT;
			goto err;
		}

		syncobj = drm_syncobj_find(file, fence.handle);
		if (!syncobj) {
			DRM_DEBUG("Invalid syncobj handle provided\n");
			err = -ENOENT;
			goto err;
		}

		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
	}

	return fences;

err:
	__free_fence_array(fences, n);
	return ERR_PTR(err);
}

static void
put_fence_array(struct drm_i915_gem_execbuffer2 *args,
		struct drm_syncobj **fences)
{
	if (fences)
		__free_fence_array(fences, args->num_cliprects);
}

static int
await_fence_array(struct i915_execbuffer *eb,
		  struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	unsigned int n;
	int err;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		struct dma_fence *fence;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_WAIT))
			continue;

		rcu_read_lock();
		fence = dma_fence_get_rcu_safe(&syncobj->fence);
		rcu_read_unlock();
		if (!fence)
			return -EINVAL;

		err = i915_gem_request_await_dma_fence(eb->request, fence);
		dma_fence_put(fence);
		if (err < 0)
			return err;
	}

	return 0;
}

static void
signal_fence_array(struct i915_execbuffer *eb,
		   struct drm_syncobj **fences)
{
	const unsigned int nfences = eb->args->num_cliprects;
	struct dma_fence * const fence = &eb->request->fence;
	unsigned int n;

	for (n = 0; n < nfences; n++) {
		struct drm_syncobj *syncobj;
		unsigned int flags;

		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
		if (!(flags & I915_EXEC_FENCE_SIGNAL))
			continue;

		drm_syncobj_replace_fence(syncobj, fence);
	}
}

2248
static int
2249
i915_gem_do_execbuffer(struct drm_device *dev,
2250 2251
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
2252 2253
		       struct drm_i915_gem_exec_object2 *exec,
		       struct drm_syncobj **fences)
2254
{
2255
	struct i915_execbuffer eb;
2256 2257 2258
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
2259
	int err;
2260

2261 2262
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2263

2264 2265 2266
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
2267
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2268
		args->flags |= __EXEC_HAS_RELOC;
2269

2270
	eb.exec = exec;
2271 2272 2273 2274
	eb.vma = memset(exec + args->buffer_count + 1, 0,
			(args->buffer_count + 1) * sizeof(*eb.vma));
	eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);

2275 2276 2277
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(eb.i915))
		eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
2278 2279
	reloc_cache_init(&eb.reloc_cache, eb.i915);

2280
	eb.buffer_count = args->buffer_count;
2281 2282 2283
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

2284
	eb.batch_flags = 0;
2285
	if (args->flags & I915_EXEC_SECURE) {
2286
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2287 2288
		    return -EPERM;

2289
		eb.batch_flags |= I915_DISPATCH_SECURE;
2290
	}
2291
	if (args->flags & I915_EXEC_IS_PINNED)
2292
		eb.batch_flags |= I915_DISPATCH_PINNED;
2293

2294 2295
	eb.engine = eb_select_engine(eb.i915, file, args);
	if (!eb.engine)
2296 2297
		return -EINVAL;

2298
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
2299
		if (!HAS_RESOURCE_STREAMER(eb.i915)) {
2300 2301 2302
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
2303
		if (eb.engine->id != RCS) {
2304
			DRM_DEBUG("RS is not available on %s\n",
2305
				 eb.engine->name);
2306 2307 2308
			return -EINVAL;
		}

2309
		eb.batch_flags |= I915_DISPATCH_RS;
2310 2311
	}

2312 2313
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2314 2315
		if (!in_fence)
			return -EINVAL;
2316 2317 2318 2319 2320
	}

	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
2321
			err = out_fence_fd;
2322
			goto err_in_fence;
2323 2324 2325
		}
	}

2326 2327 2328 2329 2330
	err = eb_create(&eb);
	if (err)
		goto err_out_fence;

	GEM_BUG_ON(!eb.lut_size);
2331

2332 2333 2334 2335
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_destroy;

2336 2337
	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
2338 2339 2340 2341 2342
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
2343
	intel_runtime_pm_get(eb.i915);
2344

2345 2346 2347
	err = i915_mutex_lock_interruptible(dev);
	if (err)
		goto err_rpm;
2348

2349
	err = eb_relocate(&eb);
2350
	if (err) {
2351 2352 2353 2354 2355 2356 2357 2358 2359
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
		goto err_vma;
2360
	}
2361

2362
	if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2363
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2364 2365
		err = -EINVAL;
		goto err_vma;
2366
	}
2367 2368
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2369
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2370 2371
		err = -EINVAL;
		goto err_vma;
2372
	}
2373

2374
	if (eb.engine->needs_cmd_parser && eb.batch_len) {
2375 2376
		struct i915_vma *vma;

2377
		vma = eb_parse(&eb, drm_is_current_master(file));
2378
		if (IS_ERR(vma)) {
2379 2380
			err = PTR_ERR(vma);
			goto err_vma;
2381
		}
2382

2383
		if (vma) {
2384 2385 2386 2387 2388 2389 2390 2391 2392
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
2393
			eb.batch_flags |= I915_DISPATCH_SECURE;
2394 2395
			eb.batch_start_offset = 0;
			eb.batch = vma;
2396
		}
2397 2398
	}

2399 2400
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
2401

2402 2403
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2404
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2405
	 * hsw should have this fixed, but bdw mucks it up again. */
2406
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2407
		struct i915_vma *vma;
2408

2409 2410 2411 2412 2413 2414
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2415
		 *   so we don't really have issues with multiple objects not
2416 2417 2418
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2419
		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2420
		if (IS_ERR(vma)) {
2421 2422
			err = PTR_ERR(vma);
			goto err_vma;
C
Chris Wilson 已提交
2423
		}
2424

2425
		eb.batch = vma;
2426
	}
2427

2428 2429 2430
	/* All GPU relocation batches must be submitted prior to the user rq */
	GEM_BUG_ON(eb.reloc_cache.rq);

2431
	/* Allocate a request for this batch buffer nice and early. */
2432 2433
	eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
	if (IS_ERR(eb.request)) {
2434
		err = PTR_ERR(eb.request);
2435
		goto err_batch_unpin;
2436
	}
2437

2438
	if (in_fence) {
2439 2440
		err = i915_gem_request_await_dma_fence(eb.request, in_fence);
		if (err < 0)
2441 2442 2443
			goto err_request;
	}

2444 2445 2446 2447 2448 2449
	if (fences) {
		err = await_fence_array(&eb, fences);
		if (err)
			goto err_request;
	}

2450
	if (out_fence_fd != -1) {
2451
		out_fence = sync_file_create(&eb.request->fence);
2452
		if (!out_fence) {
2453
			err = -ENOMEM;
2454 2455 2456 2457
			goto err_request;
		}
	}

2458 2459
	/*
	 * Whilst this request exists, batch_obj will be on the
2460 2461 2462 2463 2464
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2465
	eb.request->batch = eb.batch;
2466

2467 2468
	trace_i915_gem_request_queue(eb.request, eb.batch_flags);
	err = eb_submit(&eb);
2469
err_request:
2470
	__i915_add_request(eb.request, err == 0);
2471
	add_to_client(eb.request, file);
2472

2473 2474 2475
	if (fences)
		signal_fence_array(&eb, fences);

2476
	if (out_fence) {
2477
		if (err == 0) {
2478 2479 2480 2481 2482 2483 2484 2485
			fd_install(out_fence_fd, out_fence->file);
			args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2486

2487
err_batch_unpin:
2488
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2489
		i915_vma_unpin(eb.batch);
2490 2491 2492
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
2493
	mutex_unlock(&dev->struct_mutex);
2494
err_rpm:
2495
	intel_runtime_pm_put(eb.i915);
2496 2497
	i915_gem_context_put(eb.ctx);
err_destroy:
2498
	eb_destroy(&eb);
2499
err_out_fence:
2500 2501
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2502
err_in_fence:
2503
	dma_fence_put(in_fence);
2504
	return err;
2505 2506 2507 2508 2509 2510 2511 2512 2513 2514
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
2515 2516 2517
	const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
			   sizeof(struct i915_vma *) +
			   sizeof(unsigned int));
2518 2519 2520 2521
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2522 2523
	unsigned int i;
	int err;
2524

2525 2526
	if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
2527 2528 2529
		return -EINVAL;
	}

2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

	if (!i915_gem_check_execbuffer(&exec2))
		return -EINVAL;

2544
	/* Copy in the exec list from userland */
2545 2546 2547 2548
	exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list),
				   __GFP_NOWARN | GFP_TEMPORARY);
	exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
				    __GFP_NOWARN | GFP_TEMPORARY);
2549
	if (exec_list == NULL || exec2_list == NULL) {
2550
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2551
			  args->buffer_count);
M
Michal Hocko 已提交
2552 2553
		kvfree(exec_list);
		kvfree(exec2_list);
2554 2555
		return -ENOMEM;
	}
2556
	err = copy_from_user(exec_list,
2557
			     u64_to_user_ptr(args->buffers_ptr),
2558
			     sizeof(*exec_list) * args->buffer_count);
2559
	if (err) {
2560
		DRM_DEBUG("copy %d exec entries failed %d\n",
2561
			  args->buffer_count, err);
M
Michal Hocko 已提交
2562 2563
		kvfree(exec_list);
		kvfree(exec2_list);
2564 2565 2566 2567 2568 2569 2570 2571 2572
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2573
		if (INTEL_GEN(to_i915(dev)) < 4)
2574 2575 2576 2577 2578
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2579
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2580
	if (exec2.flags & __EXEC_HAS_RELOC) {
2581
		struct drm_i915_gem_exec_object __user *user_exec_list =
2582
			u64_to_user_ptr(args->buffers_ptr);
2583

2584
		/* Copy the new buffer offsets back to the user's exec list. */
2585
		for (i = 0; i < args->buffer_count; i++) {
2586 2587 2588
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2589
			exec2_list[i].offset =
2590 2591 2592 2593 2594
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
2595
				break;
2596 2597 2598
		}
	}

M
Michal Hocko 已提交
2599 2600
	kvfree(exec_list);
	kvfree(exec2_list);
2601
	return err;
2602 2603 2604 2605 2606 2607
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
2608 2609 2610
	const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
			   sizeof(struct i915_vma *) +
			   sizeof(unsigned int));
2611
	struct drm_i915_gem_execbuffer2 *args = data;
2612
	struct drm_i915_gem_exec_object2 *exec2_list;
2613
	struct drm_syncobj **fences = NULL;
2614
	int err;
2615

2616
	if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
2617
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
2618 2619 2620
		return -EINVAL;
	}

2621 2622 2623 2624 2625 2626
	if (!i915_gem_check_execbuffer(args))
		return -EINVAL;

	/* Allocate an extra slot for use by the command parser */
	exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
				    __GFP_NOWARN | GFP_TEMPORARY);
2627
	if (exec2_list == NULL) {
2628
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2629 2630 2631
			  args->buffer_count);
		return -ENOMEM;
	}
2632 2633 2634 2635
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
			   sizeof(*exec2_list) * args->buffer_count)) {
		DRM_DEBUG("copy %d exec entries failed\n", args->buffer_count);
M
Michal Hocko 已提交
2636
		kvfree(exec2_list);
2637 2638 2639
		return -EFAULT;
	}

2640 2641 2642 2643 2644 2645 2646 2647 2648
	if (args->flags & I915_EXEC_FENCE_ARRAY) {
		fences = get_fence_array(args, file);
		if (IS_ERR(fences)) {
			kvfree(exec2_list);
			return PTR_ERR(fences);
		}
	}

	err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2649 2650 2651 2652 2653 2654 2655 2656

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
2657
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2658 2659
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
2660

2661 2662
		/* Copy the new buffer offsets back to the user's exec list. */
		user_access_begin();
2663
		for (i = 0; i < args->buffer_count; i++) {
2664 2665 2666
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2667
			exec2_list[i].offset =
2668 2669 2670 2671
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
2672
		}
2673 2674
end_user:
		user_access_end();
2675 2676
	}

2677
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2678
	put_fence_array(args, fences);
M
Michal Hocko 已提交
2679
	kvfree(exec2_list);
2680
	return err;
2681
}