i915_gem_execbuffer.c 68.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
31
#include <linux/sync_file.h>
32 33
#include <linux/uaccess.h>

34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
36

37
#include "i915_drv.h"
38
#include "i915_gem_clflush.h"
39 40
#include "i915_trace.h"
#include "intel_drv.h"
41
#include "intel_frontbuffer.h"
42

43 44 45 46 47 48
enum {
	FORCE_CPU_RELOC = 1,
	FORCE_GTT_RELOC,
	FORCE_GPU_RELOC,
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
49

50 51 52 53 54 55
#define __EXEC_OBJECT_HAS_REF		BIT(31)
#define __EXEC_OBJECT_HAS_PIN		BIT(30)
#define __EXEC_OBJECT_HAS_FENCE		BIT(29)
#define __EXEC_OBJECT_NEEDS_MAP		BIT(28)
#define __EXEC_OBJECT_NEEDS_BIAS	BIT(27)
#define __EXEC_OBJECT_INTERNAL_FLAGS	(~0u << 27) /* all of the above */
56 57 58 59 60
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)

#define __EXEC_HAS_RELOC	BIT(31)
#define __EXEC_VALIDATED	BIT(30)
#define UPDATE			PIN_OFFSET_FIXED
61 62

#define BATCH_OFFSET_BIAS (256*1024)
63

64 65
#define __I915_EXEC_ILLEGAL_FLAGS \
	(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
66

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
/**
 * DOC: User command execution
 *
 * Userspace submits commands to be executed on the GPU as an instruction
 * stream within a GEM object we call a batchbuffer. This instructions may
 * refer to other GEM objects containing auxiliary state such as kernels,
 * samplers, render targets and even secondary batchbuffers. Userspace does
 * not know where in the GPU memory these objects reside and so before the
 * batchbuffer is passed to the GPU for execution, those addresses in the
 * batchbuffer and auxiliary objects are updated. This is known as relocation,
 * or patching. To try and avoid having to relocate each object on the next
 * execution, userspace is told the location of those objects in this pass,
 * but this remains just a hint as the kernel may choose a new location for
 * any object in the future.
 *
 * Processing an execbuf ioctl is conceptually split up into a few phases.
 *
 * 1. Validation - Ensure all the pointers, handles and flags are valid.
 * 2. Reservation - Assign GPU address space for every object
 * 3. Relocation - Update any addresses to point to the final locations
 * 4. Serialisation - Order the request with respect to its dependencies
 * 5. Construction - Construct a request to execute the batchbuffer
 * 6. Submission (at some point in the future execution)
 *
 * Reserving resources for the execbuf is the most complicated phase. We
 * neither want to have to migrate the object in the address space, nor do
 * we want to have to update any relocations pointing to this object. Ideally,
 * we want to leave the object where it is and for all the existing relocations
 * to match. If the object is given a new address, or if userspace thinks the
 * object is elsewhere, we have to parse all the relocation entries and update
 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
 * all the target addresses in all of its objects match the value in the
 * relocation entries and that they all match the presumed offsets given by the
 * list of execbuffer objects. Using this knowledge, we know that if we haven't
 * moved any buffers, all the relocation entries are valid and we can skip
 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
 *
 *      The addresses written in the objects must match the corresponding
 *      reloc.presumed_offset which in turn must match the corresponding
 *      execobject.offset.
 *
 *      Any render targets written to in the batch must be flagged with
 *      EXEC_OBJECT_WRITE.
 *
 *      To avoid stalling, execobject.offset should match the current
 *      address of that object within the active context.
 *
 * The reservation is done is multiple phases. First we try and keep any
 * object already bound in its current location - so as long as meets the
 * constraints imposed by the new execbuffer. Any object left unbound after the
 * first pass is then fitted into any available idle space. If an object does
 * not fit, all objects are removed from the reservation and the process rerun
 * after sorting the objects into a priority order (more difficult to fit
 * objects are tried first). Failing that, the entire VM is cleared and we try
 * to fit the execbuf once last time before concluding that it simply will not
 * fit.
 *
 * A small complication to all of this is that we allow userspace not only to
 * specify an alignment and a size for the object in the address space, but
 * we also allow userspace to specify the exact offset. This objects are
 * simpler to place (the location is known a priori) all we have to do is make
 * sure the space is available.
 *
 * Once all the objects are in place, patching up the buried pointers to point
 * to the final locations is a fairly simple job of walking over the relocation
 * entry arrays, looking up the right address and rewriting the value into
 * the object. Simple! ... The relocation entries are stored in user memory
 * and so to access them we have to copy them into a local buffer. That copy
 * has to avoid taking any pagefaults as they may lead back to a GEM object
 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
 * the relocation into multiple passes. First we try to do everything within an
 * atomic context (avoid the pagefaults) which requires that we never wait. If
 * we detect that we may wait, or if we need to fault, then we have to fallback
 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
 * bells yet?) Dropping the mutex means that we lose all the state we have
 * built up so far for the execbuf and we must reset any global data. However,
 * we do leave the objects pinned in their final locations - which is a
 * potential issue for concurrent execbufs. Once we have left the mutex, we can
 * allocate and copy all the relocation entries into a large array at our
 * leisure, reacquire the mutex, reclaim all the objects and other state and
 * then proceed to update any incorrect addresses with the objects.
 *
 * As we process the relocation entries, we maintain a record of whether the
 * object is being written to. Using NORELOC, we expect userspace to provide
 * this information instead. We also check whether we can skip the relocation
 * by comparing the expected value inside the relocation entry with the target's
 * final address. If they differ, we have to map the current object and rewrite
 * the 4 or 8 byte pointer within.
 *
 * Serialising an execbuf is quite simple according to the rules of the GEM
 * ABI. Execution within each context is ordered by the order of submission.
 * Writes to any GEM object are in order of submission and are exclusive. Reads
 * from a GEM object are unordered with respect to other reads, but ordered by
 * writes. A write submitted after a read cannot occur before the read, and
 * similarly any read submitted after a write cannot occur before the write.
 * Writes are ordered between engines such that only one write occurs at any
 * time (completing any reads beforehand) - using semaphores where available
 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
 * reads before starting, and any read (either using set-domain or pread) must
 * flush all GPU writes before starting. (Note we only employ a barrier before,
 * we currently rely on userspace not concurrently starting a new execution
 * whilst reading or writing to an object. This may be an advantage or not
 * depending on how much you trust userspace not to shoot themselves in the
 * foot.) Serialisation may just result in the request being inserted into
 * a DAG awaiting its turn, but most simple is to wait on the CPU until
 * all dependencies are resolved.
 *
 * After all of that, is just a matter of closing the request and handing it to
 * the hardware (well, leaving it in a queue to be executed). However, we also
 * offer the ability for batchbuffers to be run with elevated privileges so
 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
 * Before any batch is given extra privileges we first must check that it
 * contains no nefarious instructions, we check that each instruction is from
 * our whitelist and all registers are also from an allowed list. We first
 * copy the user's batchbuffer to a shadow (so that the user doesn't have
 * access to it, either by the CPU or GPU as we scan it) and then parse each
 * instruction. If everything is ok, we set a flag telling the hardware to run
 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
 */

189
struct i915_execbuffer {
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
	struct drm_i915_private *i915; /** i915 backpointer */
	struct drm_file *file; /** per-file lookup tables and limits */
	struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
	struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */

	struct intel_engine_cs *engine; /** engine to queue the request to */
	struct i915_gem_context *ctx; /** context for building the request */
	struct i915_address_space *vm; /** GTT and vma for the request */

	struct drm_i915_gem_request *request; /** our request to build */
	struct i915_vma *batch; /** identity of the batch obj/vma */

	/** actual size of execobj[] as we may extend it for the cmdparser */
	unsigned int buffer_count;

	/** list of vma not yet bound during reservation phase */
	struct list_head unbound;

	/** list of vma that have execobj.relocation_count */
	struct list_head relocs;

	/**
	 * Track the most recently used object for relocations, as we
	 * frequently have to perform multiple relocations within the same
	 * obj/page
	 */
216
	struct reloc_cache {
217 218 219
		struct drm_mm_node node; /** temporary GTT binding */
		unsigned long vaddr; /** Current kmap address */
		unsigned long page; /** Currently mapped page index */
220
		unsigned int gen; /** Cached value of INTEL_GEN */
221
		bool use_64bit_reloc : 1;
222 223 224
		bool has_llc : 1;
		bool has_fence : 1;
		bool needs_unfenced : 1;
225 226 227 228

		struct drm_i915_gem_request *rq;
		u32 *rq_cmd;
		unsigned int rq_size;
229
	} reloc_cache;
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244

	u64 invalid_flags; /** Set of execobj.flags that are invalid */
	u32 context_flags; /** Set of execobj.flags to insert from the ctx */

	u32 batch_start_offset; /** Location within object of batch */
	u32 batch_len; /** Length of batch within object */
	u32 batch_flags; /** Flags composed for emit_bb_start() */

	/**
	 * Indicate either the size of the hastable used to resolve
	 * relocation handles, or if negative that we are using a direct
	 * index into the execobj[].
	 */
	int lut_size;
	struct hlist_head *buckets; /** ht for relocation handles */
245 246
};

247 248 249 250 251 252 253 254
/*
 * As an alternative to creating a hashtable of handle-to-vma for a batch,
 * we used the last available reserved field in the execobject[] and stash
 * a link from the execobj to its vma.
 */
#define __exec_to_vma(ee) (ee)->rsvd2
#define exec_to_vma(ee) u64_to_ptr(struct i915_vma, __exec_to_vma(ee))

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
/*
 * Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline u64 gen8_canonical_addr(u64 address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline u64 gen8_noncanonical_addr(u64 address)
{
	return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}

274
static int eb_create(struct i915_execbuffer *eb)
275
{
276 277
	if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
		unsigned int size = 1 + ilog2(eb->buffer_count);
278

279 280 281 282 283 284 285 286 287 288 289
		/*
		 * Without a 1:1 association between relocation handles and
		 * the execobject[] index, we instead create a hashtable.
		 * We size it dynamically based on available memory, starting
		 * first with 1:1 assocative hash and scaling back until
		 * the allocation succeeds.
		 *
		 * Later on we use a positive lut_size to indicate we are
		 * using this hashtable, and a negative value to indicate a
		 * direct lookup.
		 */
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
		do {
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
					      GFP_TEMPORARY |
					      __GFP_NORETRY |
					      __GFP_NOWARN);
			if (eb->buckets)
				break;
		} while (--size);

		if (unlikely(!eb->buckets)) {
			eb->buckets = kzalloc(sizeof(struct hlist_head),
					      GFP_TEMPORARY);
			if (unlikely(!eb->buckets))
				return -ENOMEM;
		}
305

306
		eb->lut_size = size;
307
	} else {
308
		eb->lut_size = -eb->buffer_count;
309
	}
310

311
	return 0;
312 313
}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
		 const struct i915_vma *vma)
{
	if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
		return true;

	if (vma->node.size < entry->pad_to_size)
		return true;

	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
		return true;

	if (entry->flags & EXEC_OBJECT_PINNED &&
	    vma->node.start != entry->offset)
		return true;

	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

	if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

	return false;
}

static inline void
eb_pin_vma(struct i915_execbuffer *eb,
	   struct drm_i915_gem_exec_object2 *entry,
	   struct i915_vma *vma)
{
	u64 flags;

349 350 351 352 353 354
	if (vma->node.size)
		flags = vma->node.start;
	else
		flags = entry->offset & PIN_OFFSET_MASK;

	flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
355 356
	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_GTT))
		flags |= PIN_GLOBAL;
357

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
	if (unlikely(i915_vma_pin(vma, 0, 0, flags)))
		return;

	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
		if (unlikely(i915_vma_get_fence(vma))) {
			i915_vma_unpin(vma);
			return;
		}

		if (i915_vma_pin_fence(vma))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
	}

	entry->flags |= __EXEC_OBJECT_HAS_PIN;
}

374 375 376 377
static inline void
__eb_unreserve_vma(struct i915_vma *vma,
		   const struct drm_i915_gem_exec_object2 *entry)
{
378 379
	GEM_BUG_ON(!(entry->flags & __EXEC_OBJECT_HAS_PIN));

380 381 382
	if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
		i915_vma_unpin_fence(vma);

383
	__i915_vma_unpin(vma);
384 385
}

386 387 388
static inline void
eb_unreserve_vma(struct i915_vma *vma,
		 struct drm_i915_gem_exec_object2 *entry)
389
{
390 391
	if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
		return;
392 393

	__eb_unreserve_vma(vma, entry);
394
	entry->flags &= ~__EXEC_OBJECT_RESERVED;
395 396
}

397 398 399 400
static int
eb_validate_vma(struct i915_execbuffer *eb,
		struct drm_i915_gem_exec_object2 *entry,
		struct i915_vma *vma)
401
{
402 403
	if (unlikely(entry->flags & eb->invalid_flags))
		return -EINVAL;
404

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
	if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
		return -EINVAL;

	/*
	 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
	 * any non-page-aligned or non-canonical addresses.
	 */
	if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
		     entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
		return -EINVAL;

	/* pad_to_size was once a reserved field, so sanitize it */
	if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
		if (unlikely(offset_in_page(entry->pad_to_size)))
			return -EINVAL;
	} else {
		entry->pad_to_size = 0;
422 423
	}

424 425 426 427 428 429 430 431 432 433 434 435 436 437
	if (unlikely(vma->exec_entry)) {
		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
			  entry->handle, (int)(entry - eb->exec));
		return -EINVAL;
	}

	/*
	 * From drm_mm perspective address space is continuous,
	 * so from this point we're always using non-canonical
	 * form internally.
	 */
	entry->offset = gen8_noncanonical_addr(entry->offset);

	return 0;
438 439
}

440 441 442 443
static int
eb_add_vma(struct i915_execbuffer *eb,
	   struct drm_i915_gem_exec_object2 *entry,
	   struct i915_vma *vma)
444
{
445 446 447 448 449 450 451 452
	int err;

	GEM_BUG_ON(i915_vma_is_closed(vma));

	if (!(eb->args->flags & __EXEC_VALIDATED)) {
		err = eb_validate_vma(eb, entry, vma);
		if (unlikely(err))
			return err;
453 454
	}

455 456
	if (eb->lut_size >= 0) {
		vma->exec_handle = entry->handle;
457
		hlist_add_head(&vma->exec_node,
458 459
			       &eb->buckets[hash_32(entry->handle,
						    eb->lut_size)]);
460
	}
461

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	if (entry->relocation_count)
		list_add_tail(&vma->reloc_link, &eb->relocs);

	if (!eb->reloc_cache.has_fence) {
		entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
	} else {
		if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
		     eb->reloc_cache.needs_unfenced) &&
		    i915_gem_object_is_tiled(vma->obj))
			entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
	}

	if (!(entry->flags & EXEC_OBJECT_PINNED))
		entry->flags |= eb->context_flags;

	/*
	 * Stash a pointer from the vma to execobj, so we can query its flags,
	 * size, alignment etc as provided by the user. Also we stash a pointer
	 * to the vma inside the execobj so that we can use a direct lookup
	 * to find the right target VMA when doing relocations.
	 */
	vma->exec_entry = entry;
484
	__exec_to_vma(entry) = (uintptr_t)vma;
485 486

	err = 0;
487
	eb_pin_vma(eb, entry, vma);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
	if (eb_vma_misplaced(entry, vma)) {
		eb_unreserve_vma(vma, entry);

		list_add_tail(&vma->exec_link, &eb->unbound);
		if (drm_mm_node_allocated(&vma->node))
			err = i915_vma_unbind(vma);
	} else {
		if (entry->offset != vma->node.start) {
			entry->offset = vma->node.start | UPDATE;
			eb->args->flags |= __EXEC_HAS_RELOC;
		}
	}
	return err;
}

static inline int use_cpu_reloc(const struct reloc_cache *cache,
				const struct drm_i915_gem_object *obj)
{
	if (!i915_gem_object_has_struct_page(obj))
		return false;

509 510 511 512 513
	if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
		return true;

	if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
		return false;
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643

	return (cache->has_llc ||
		obj->cache_dirty ||
		obj->cache_level != I915_CACHE_NONE);
}

static int eb_reserve_vma(const struct i915_execbuffer *eb,
			  struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	u64 flags;
	int err;

	flags = PIN_USER | PIN_NONBLOCK;
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
		flags |= PIN_GLOBAL;

	/*
	 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
	 * limit address to the first 4GBs for unflagged objects.
	 */
	if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
		flags |= PIN_ZONE_4G;

	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
		flags |= PIN_MAPPABLE;

	if (entry->flags & EXEC_OBJECT_PINNED) {
		flags |= entry->offset | PIN_OFFSET_FIXED;
		flags &= ~PIN_NONBLOCK; /* force overlapping PINNED checks */
	} else if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) {
		flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
	}

	err = i915_vma_pin(vma, entry->pad_to_size, entry->alignment, flags);
	if (err)
		return err;

	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start | UPDATE;
		eb->args->flags |= __EXEC_HAS_RELOC;
	}

	entry->flags |= __EXEC_OBJECT_HAS_PIN;
	GEM_BUG_ON(eb_vma_misplaced(entry, vma));

	if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
		err = i915_vma_get_fence(vma);
		if (unlikely(err)) {
			i915_vma_unpin(vma);
			return err;
		}

		if (i915_vma_pin_fence(vma))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
	}

	return 0;
}

static int eb_reserve(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	struct list_head last;
	struct i915_vma *vma;
	unsigned int i, pass;
	int err;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 */

	pass = 0;
	err = 0;
	do {
		list_for_each_entry(vma, &eb->unbound, exec_link) {
			err = eb_reserve_vma(eb, vma);
			if (err)
				break;
		}
		if (err != -ENOSPC)
			return err;

		/* Resort *all* the objects into priority order */
		INIT_LIST_HEAD(&eb->unbound);
		INIT_LIST_HEAD(&last);
		for (i = 0; i < count; i++) {
			struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];

			if (entry->flags & EXEC_OBJECT_PINNED &&
			    entry->flags & __EXEC_OBJECT_HAS_PIN)
				continue;

			vma = exec_to_vma(entry);
			eb_unreserve_vma(vma, entry);

			if (entry->flags & EXEC_OBJECT_PINNED)
				list_add(&vma->exec_link, &eb->unbound);
			else if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
				list_add_tail(&vma->exec_link, &eb->unbound);
			else
				list_add_tail(&vma->exec_link, &last);
		}
		list_splice_tail(&last, &eb->unbound);

		switch (pass++) {
		case 0:
			break;

		case 1:
			/* Too fragmented, unbind everything and retry */
			err = i915_gem_evict_vm(eb->vm);
			if (err)
				return err;
			break;

		default:
			return -ENOSPC;
		}
	} while (1);
644
}
645

646
static inline struct hlist_head *
647
ht_head(const  struct i915_gem_context_vma_lut *lut, u32 handle)
648
{
649
	return &lut->ht[hash_32(handle, lut->ht_bits)];
650 651 652
}

static inline bool
653
ht_needs_resize(const struct i915_gem_context_vma_lut *lut)
654
{
655 656
	return (4*lut->ht_count > 3*lut->ht_size ||
		4*lut->ht_count + 1 < lut->ht_size);
657 658
}

659 660
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
661 662 663 664
	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
		return 0;
	else
		return eb->buffer_count - 1;
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
}

static int eb_select_context(struct i915_execbuffer *eb)
{
	struct i915_gem_context *ctx;

	ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
	if (unlikely(IS_ERR(ctx)))
		return PTR_ERR(ctx);

	if (unlikely(i915_gem_context_is_banned(ctx))) {
		DRM_DEBUG("Context %u tried to submit while banned\n",
			  ctx->user_handle);
		return -EIO;
	}

	eb->ctx = i915_gem_context_get(ctx);
	eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;

	eb->context_flags = 0;
	if (ctx->flags & CONTEXT_NO_ZEROMAP)
		eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return 0;
}

static int eb_lookup_vmas(struct i915_execbuffer *eb)
692
{
693
#define INTERMEDIATE BIT(0)
694 695
	const unsigned int count = eb->buffer_count;
	struct i915_gem_context_vma_lut *lut = &eb->ctx->vma_lut;
696
	struct i915_vma *vma;
697 698
	struct idr *idr;
	unsigned int i;
699
	int slow_pass = -1;
700
	int err;
701

702 703
	INIT_LIST_HEAD(&eb->relocs);
	INIT_LIST_HEAD(&eb->unbound);
704

705 706 707
	if (unlikely(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS))
		flush_work(&lut->resize);
	GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
708 709 710 711 712

	for (i = 0; i < count; i++) {
		__exec_to_vma(&eb->exec[i]) = 0;

		hlist_for_each_entry(vma,
713
				     ht_head(lut, eb->exec[i].handle),
714 715 716 717
				     ctx_node) {
			if (vma->ctx_handle != eb->exec[i].handle)
				continue;

718 719 720
			err = eb_add_vma(eb, &eb->exec[i], vma);
			if (unlikely(err))
				return err;
721 722 723 724 725 726 727 728 729 730

			goto next_vma;
		}

		if (slow_pass < 0)
			slow_pass = i;
next_vma: ;
	}

	if (slow_pass < 0)
731
		goto out;
732

733
	spin_lock(&eb->file->table_lock);
734 735 736 737 738
	/*
	 * Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC
	 */
	idr = &eb->file->object_idr;
739 740
	for (i = slow_pass; i < count; i++) {
		struct drm_i915_gem_object *obj;
741

742 743 744
		if (__exec_to_vma(&eb->exec[i]))
			continue;

745
		obj = to_intel_bo(idr_find(idr, eb->exec[i].handle));
746
		if (unlikely(!obj)) {
747
			spin_unlock(&eb->file->table_lock);
748 749
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				  eb->exec[i].handle, i);
750 751
			err = -ENOENT;
			goto err;
752 753
		}

754
		__exec_to_vma(&eb->exec[i]) = INTERMEDIATE | (uintptr_t)obj;
755
	}
756
	spin_unlock(&eb->file->table_lock);
757

758 759
	for (i = slow_pass; i < count; i++) {
		struct drm_i915_gem_object *obj;
760

761
		if (!(__exec_to_vma(&eb->exec[i]) & INTERMEDIATE))
762
			continue;
763

764 765 766 767 768 769 770 771
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
772
		obj = u64_to_ptr(typeof(*obj),
773
				 __exec_to_vma(&eb->exec[i]) & ~INTERMEDIATE);
774
		vma = i915_vma_instance(obj, eb->vm, NULL);
C
Chris Wilson 已提交
775
		if (unlikely(IS_ERR(vma))) {
776
			DRM_DEBUG("Failed to lookup VMA\n");
777 778
			err = PTR_ERR(vma);
			goto err;
779 780
		}

781 782 783 784 785
		/* First come, first served */
		if (!vma->ctx) {
			vma->ctx = eb->ctx;
			vma->ctx_handle = eb->exec[i].handle;
			hlist_add_head(&vma->ctx_node,
786 787 788
				       ht_head(lut, eb->exec[i].handle));
			lut->ht_count++;
			lut->ht_size |= I915_CTX_RESIZE_IN_PROGRESS;
789 790 791 792
			if (i915_vma_is_ggtt(vma)) {
				GEM_BUG_ON(obj->vma_hashed);
				obj->vma_hashed = vma;
			}
793 794

			i915_vma_get(vma);
795
		}
796

797 798 799
		err = eb_add_vma(eb, &eb->exec[i], vma);
		if (unlikely(err))
			goto err;
800 801 802 803 804 805

		/* Only after we validated the user didn't use our bits */
		if (vma->ctx != eb->ctx) {
			i915_vma_get(vma);
			eb->exec[i].flags |= __EXEC_OBJECT_HAS_REF;
		}
806 807
	}

808 809 810 811 812
	if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
		if (ht_needs_resize(lut))
			queue_work(system_highpri_wq, &lut->resize);
		else
			lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
813 814
	}

815 816 817 818
out:
	/* take note of the batch buffer before we might reorder the lists */
	i = eb_batch_index(eb);
	eb->batch = exec_to_vma(&eb->exec[i]);
819

820
	/*
821 822 823 824 825 826 827
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
828
	 */
829 830 831 832
	if (!(eb->exec[i].flags & EXEC_OBJECT_PINNED))
		eb->exec[i].flags |= __EXEC_OBJECT_NEEDS_BIAS;
	if (eb->reloc_cache.has_fence)
		eb->exec[i].flags |= EXEC_OBJECT_NEEDS_FENCE;
833

834 835 836 837 838 839 840 841 842 843 844
	eb->args->flags |= __EXEC_VALIDATED;
	return eb_reserve(eb);

err:
	for (i = slow_pass; i < count; i++) {
		if (__exec_to_vma(&eb->exec[i]) & INTERMEDIATE)
			__exec_to_vma(&eb->exec[i]) = 0;
	}
	lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
	return err;
#undef INTERMEDIATE
845 846
}

847
static struct i915_vma *
848
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
849
{
850 851
	if (eb->lut_size < 0) {
		if (handle >= -eb->lut_size)
852
			return NULL;
853
		return exec_to_vma(&eb->exec[handle]);
854 855
	} else {
		struct hlist_head *head;
856
		struct i915_vma *vma;
857

858
		head = &eb->buckets[hash_32(handle, eb->lut_size)];
859
		hlist_for_each_entry(vma, head, exec_node) {
860 861
			if (vma->exec_handle == handle)
				return vma;
862 863 864
		}
		return NULL;
	}
865 866
}

867
static void eb_release_vmas(const struct i915_execbuffer *eb)
868
{
869 870 871 872 873 874
	const unsigned int count = eb->buffer_count;
	unsigned int i;

	for (i = 0; i < count; i++) {
		struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
		struct i915_vma *vma = exec_to_vma(entry);
875

876
		if (!vma)
877
			continue;
878

879
		GEM_BUG_ON(vma->exec_entry != entry);
880
		vma->exec_entry = NULL;
881

882 883 884 885 886
		if (entry->flags & __EXEC_OBJECT_HAS_PIN)
			__eb_unreserve_vma(vma, entry);

		if (entry->flags & __EXEC_OBJECT_HAS_REF)
			i915_vma_put(vma);
887

888 889
		entry->flags &=
			~(__EXEC_OBJECT_RESERVED | __EXEC_OBJECT_HAS_REF);
890
	}
891 892
}

893
static void eb_reset_vmas(const struct i915_execbuffer *eb)
894
{
895 896 897 898
	eb_release_vmas(eb);
	if (eb->lut_size >= 0)
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_size);
899 900
}

901
static void eb_destroy(const struct i915_execbuffer *eb)
902
{
903 904
	GEM_BUG_ON(eb->reloc_cache.rq);

905 906
	if (eb->lut_size >= 0)
		kfree(eb->buckets);
907 908
}

909
static inline u64
910
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
911
		  const struct i915_vma *target)
912
{
913
	return gen8_canonical_addr((int)reloc->delta + target->node.start);
914 915
}

916 917
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
918
{
919
	cache->page = -1;
920
	cache->vaddr = 0;
921
	/* Must be a variable in the struct to allow GCC to unroll. */
922
	cache->gen = INTEL_GEN(i915);
923
	cache->has_llc = HAS_LLC(i915);
924
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
925 926
	cache->has_fence = cache->gen < 4;
	cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
927
	cache->node.allocated = false;
928 929
	cache->rq = NULL;
	cache->rq_size = 0;
930
}
931

932 933 934 935 936 937 938 939
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
940 941
}

942 943
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

944 945 946 947 948 949 950
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

951 952 953 954 955 956 957 958 959 960 961
static void reloc_gpu_flush(struct reloc_cache *cache)
{
	GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
	cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
	i915_gem_object_unpin_map(cache->rq->batch->obj);
	i915_gem_chipset_flush(cache->rq->i915);

	__i915_add_request(cache->rq, true);
	cache->rq = NULL;
}

962
static void reloc_cache_reset(struct reloc_cache *cache)
963
{
964
	void *vaddr;
965

966 967 968
	if (cache->rq)
		reloc_gpu_flush(cache);

969 970
	if (!cache->vaddr)
		return;
971

972 973 974 975
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
976

977 978 979
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
980
		wmb();
981
		io_mapping_unmap_atomic((void __iomem *)vaddr);
982
		if (cache->node.allocated) {
983
			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
984 985 986

			ggtt->base.clear_range(&ggtt->base,
					       cache->node.start,
987
					       cache->node.size);
988 989 990
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
991
		}
992
	}
993 994 995

	cache->vaddr = 0;
	cache->page = -1;
996 997 998 999
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
1000
			unsigned long page)
1001
{
1002 1003 1004 1005 1006 1007
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
1008
		int err;
1009

1010 1011 1012
		err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
		if (err)
			return ERR_PTR(err);
1013 1014 1015

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
1016

1017 1018 1019 1020
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
1021 1022
	}

1023 1024
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1025
	cache->page = page;
1026

1027
	return vaddr;
1028 1029
}

1030 1031
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1032
			 unsigned long page)
1033
{
1034
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1035
	unsigned long offset;
1036
	void *vaddr;
1037

1038
	if (cache->vaddr) {
1039
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1040 1041
	} else {
		struct i915_vma *vma;
1042
		int err;
1043

1044
		if (use_cpu_reloc(cache, obj))
1045
			return NULL;
1046

1047 1048 1049
		err = i915_gem_object_set_to_gtt_domain(obj, true);
		if (err)
			return ERR_PTR(err);
1050

1051 1052
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
					       PIN_MAPPABLE | PIN_NONBLOCK);
1053 1054
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
1055
			err = drm_mm_insert_node_in_range
1056
				(&ggtt->base.mm, &cache->node,
1057
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1058
				 0, ggtt->mappable_end,
1059
				 DRM_MM_INSERT_LOW);
1060
			if (err) /* no inactive aperture space, use cpu reloc */
1061
				return NULL;
1062
		} else {
1063 1064
			err = i915_vma_put_fence(vma);
			if (err) {
1065
				i915_vma_unpin(vma);
1066
				return ERR_PTR(err);
1067
			}
1068

1069 1070
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
1071
		}
1072
	}
1073

1074 1075
	offset = cache->node.start;
	if (cache->node.allocated) {
1076
		wmb();
1077 1078 1079 1080 1081
		ggtt->base.insert_page(&ggtt->base,
				       i915_gem_object_get_dma_address(obj, page),
				       offset, I915_CACHE_NONE, 0);
	} else {
		offset += page << PAGE_SHIFT;
1082 1083
	}

1084 1085
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
							 offset);
1086 1087
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
1088

1089
	return vaddr;
1090 1091
}

1092 1093
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
1094
			 unsigned long page)
1095
{
1096
	void *vaddr;
1097

1098 1099 1100 1101 1102 1103 1104 1105
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
1106 1107
	}

1108
	return vaddr;
1109 1110
}

1111
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1112
{
1113 1114 1115 1116 1117
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
1118

1119
		*addr = value;
1120

1121 1122
		/*
		 * Writes to the same cacheline are serialised by the CPU
1123 1124 1125 1126 1127 1128 1129 1130 1131
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
1132 1133
}

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
			     struct i915_vma *vma,
			     unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	struct drm_i915_gem_object *obj;
	struct drm_i915_gem_request *rq;
	struct i915_vma *batch;
	u32 *cmd;
	int err;

	GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU);

	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	cmd = i915_gem_object_pin_map(obj,
				      cache->has_llc ? I915_MAP_WB : I915_MAP_WC);
	i915_gem_object_unpin_pages(obj);
	if (IS_ERR(cmd))
		return PTR_ERR(cmd);

	err = i915_gem_object_set_to_wc_domain(obj, false);
	if (err)
		goto err_unmap;

	batch = i915_vma_instance(obj, vma->vm, NULL);
	if (IS_ERR(batch)) {
		err = PTR_ERR(batch);
		goto err_unmap;
	}

	err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
	if (err)
		goto err_unmap;

	rq = i915_gem_request_alloc(eb->engine, eb->ctx);
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto err_unpin;
	}

	err = i915_gem_request_await_object(rq, vma->obj, true);
	if (err)
		goto err_request;

	err = eb->engine->emit_flush(rq, EMIT_INVALIDATE);
	if (err)
		goto err_request;

	err = i915_switch_context(rq);
	if (err)
		goto err_request;

	err = eb->engine->emit_bb_start(rq,
					batch->node.start, PAGE_SIZE,
					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
	if (err)
		goto err_request;

1195
	GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
1196
	i915_vma_move_to_active(batch, rq, 0);
1197 1198 1199
	reservation_object_lock(batch->resv, NULL);
	reservation_object_add_excl_fence(batch->resv, &rq->fence);
	reservation_object_unlock(batch->resv);
1200 1201 1202
	i915_vma_unpin(batch);

	i915_vma_move_to_active(vma, rq, true);
1203 1204 1205
	reservation_object_lock(vma->resv, NULL);
	reservation_object_add_excl_fence(vma->resv, &rq->fence);
	reservation_object_unlock(vma->resv);
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248

	rq->batch = batch;

	cache->rq = rq;
	cache->rq_cmd = cmd;
	cache->rq_size = 0;

	/* Return with batch mapping (cmd) still pinned */
	return 0;

err_request:
	i915_add_request(rq);
err_unpin:
	i915_vma_unpin(batch);
err_unmap:
	i915_gem_object_unpin_map(obj);
	return err;
}

static u32 *reloc_gpu(struct i915_execbuffer *eb,
		      struct i915_vma *vma,
		      unsigned int len)
{
	struct reloc_cache *cache = &eb->reloc_cache;
	u32 *cmd;

	if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
		reloc_gpu_flush(cache);

	if (unlikely(!cache->rq)) {
		int err;

		err = __reloc_gpu_alloc(eb, vma, len);
		if (unlikely(err))
			return ERR_PTR(err);
	}

	cmd = cache->rq_cmd + cache->rq_size;
	cache->rq_size += len;

	return cmd;
}

1249 1250
static u64
relocate_entry(struct i915_vma *vma,
1251
	       const struct drm_i915_gem_relocation_entry *reloc,
1252 1253
	       struct i915_execbuffer *eb,
	       const struct i915_vma *target)
1254
{
1255
	u64 offset = reloc->offset;
1256 1257
	u64 target_offset = relocation_target(reloc, target);
	bool wide = eb->reloc_cache.use_64bit_reloc;
1258
	void *vaddr;
1259

1260 1261
	if (!eb->reloc_cache.vaddr &&
	    (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1262
	     !reservation_object_test_signaled_rcu(vma->resv, true))) {
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
		const unsigned int gen = eb->reloc_cache.gen;
		unsigned int len;
		u32 *batch;
		u64 addr;

		if (wide)
			len = offset & 7 ? 8 : 5;
		else if (gen >= 4)
			len = 4;
		else if (gen >= 3)
			len = 3;
		else /* On gen2 MI_STORE_DWORD_IMM uses a physical address */
			goto repeat;

		batch = reloc_gpu(eb, vma, len);
		if (IS_ERR(batch))
			goto repeat;

		addr = gen8_canonical_addr(vma->node.start + offset);
		if (wide) {
			if (offset & 7) {
				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);

				addr = gen8_canonical_addr(addr + 4);

				*batch++ = MI_STORE_DWORD_IMM_GEN4;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = upper_32_bits(target_offset);
			} else {
				*batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
				*batch++ = lower_32_bits(addr);
				*batch++ = upper_32_bits(addr);
				*batch++ = lower_32_bits(target_offset);
				*batch++ = upper_32_bits(target_offset);
			}
		} else if (gen >= 6) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else if (gen >= 4) {
			*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
			*batch++ = 0;
			*batch++ = addr;
			*batch++ = target_offset;
		} else {
			*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
			*batch++ = addr;
			*batch++ = target_offset;
		}

		goto out;
	}

1321
repeat:
1322
	vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1323 1324 1325 1326 1327
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
1328
			eb->reloc_cache.vaddr);
1329 1330 1331 1332 1333 1334

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
1335 1336
	}

1337
out:
1338
	return target->node.start | UPDATE;
1339 1340
}

1341 1342 1343 1344
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
		  struct i915_vma *vma,
		  const struct drm_i915_gem_relocation_entry *reloc)
1345
{
1346
	struct i915_vma *target;
1347
	int err;
1348

1349
	/* we've already hold a reference to all valid objects */
1350 1351
	target = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(!target))
1352
		return -ENOENT;
1353

1354
	/* Validate that the target is in a valid r/w GPU domain */
1355
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1356
		DRM_DEBUG("reloc with multiple write domains: "
1357
			  "target %d offset %d "
1358
			  "read %08x write %08x",
1359
			  reloc->target_handle,
1360 1361 1362
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1363
		return -EINVAL;
1364
	}
1365 1366
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
1367
		DRM_DEBUG("reloc with read/write non-GPU domains: "
1368
			  "target %d offset %d "
1369
			  "read %08x write %08x",
1370
			  reloc->target_handle,
1371 1372 1373
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
1374
		return -EINVAL;
1375 1376
	}

1377
	if (reloc->write_domain) {
1378 1379
		target->exec_entry->flags |= EXEC_OBJECT_WRITE;

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
		/*
		 * Sandybridge PPGTT errata: We need a global gtt mapping
		 * for MI and pipe_control writes because the gpu doesn't
		 * properly redirect them through the ppgtt for non_secure
		 * batchbuffers.
		 */
		if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
		    IS_GEN6(eb->i915)) {
			err = i915_vma_bind(target, target->obj->cache_level,
					    PIN_GLOBAL);
			if (WARN_ONCE(err,
				      "Unexpected failure to bind target VMA!"))
				return err;
		}
1394
	}
1395

1396 1397
	/*
	 * If the relocation already has the right value in it, no
1398 1399
	 * more work needs to be done.
	 */
1400 1401
	if (!DBG_FORCE_RELOC &&
	    gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1402
		return 0;
1403 1404

	/* Check that the relocation address is valid... */
1405
	if (unlikely(reloc->offset >
1406
		     vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1407
		DRM_DEBUG("Relocation beyond object bounds: "
1408 1409 1410 1411
			  "target %d offset %d size %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset,
			  (int)vma->size);
1412
		return -EINVAL;
1413
	}
1414
	if (unlikely(reloc->offset & 3)) {
1415
		DRM_DEBUG("Relocation not 4-byte aligned: "
1416 1417 1418
			  "target %d offset %d.\n",
			  reloc->target_handle,
			  (int)reloc->offset);
1419
		return -EINVAL;
1420 1421
	}

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	/*
	 * If we write into the object, we need to force the synchronisation
	 * barrier, either with an asynchronous clflush or if we executed the
	 * patching using the GPU (though that should be serialised by the
	 * timeline). To be completely sure, and since we are required to
	 * do relocations we are already stalling, disable the user's opt
	 * of our synchronisation.
	 */
	vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;

1432
	/* and update the user's relocation entry */
1433
	return relocate_entry(vma, reloc, eb, target);
1434 1435
}

1436
static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1437
{
1438
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1439 1440 1441 1442
	struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
	struct drm_i915_gem_relocation_entry __user *urelocs;
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	unsigned int remain;
1443

1444
	urelocs = u64_to_user_ptr(entry->relocs_ptr);
1445
	remain = entry->relocation_count;
1446 1447
	if (unlikely(remain > N_RELOC(ULONG_MAX)))
		return -EINVAL;
1448

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
	/*
	 * We must check that the entire relocation array is safe
	 * to read. However, if the array is not writable the user loses
	 * the updated relocation values.
	 */
	if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
		return -EFAULT;

	do {
		struct drm_i915_gem_relocation_entry *r = stack;
		unsigned int count =
			min_t(unsigned int, remain, ARRAY_SIZE(stack));
		unsigned int copied;
1462

1463 1464
		/*
		 * This is the fast path and we cannot handle a pagefault
1465 1466 1467 1468 1469 1470 1471
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
1472
		copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1473
		pagefault_enable();
1474 1475
		if (unlikely(copied)) {
			remain = -EFAULT;
1476 1477
			goto out;
		}
1478

1479
		remain -= count;
1480
		do {
1481
			u64 offset = eb_relocate_entry(eb, vma, r);
1482

1483 1484 1485
			if (likely(offset == 0)) {
			} else if ((s64)offset < 0) {
				remain = (int)offset;
1486
				goto out;
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
			} else {
				/*
				 * Note that reporting an error now
				 * leaves everything in an inconsistent
				 * state as we have *already* changed
				 * the relocation value inside the
				 * object. As we have not changed the
				 * reloc.presumed_offset or will not
				 * change the execobject.offset, on the
				 * call we may not rewrite the value
				 * inside the object, leaving it
				 * dangling and causing a GPU hang. Unless
				 * userspace dynamically rebuilds the
				 * relocations on each execbuf rather than
				 * presume a static tree.
				 *
				 * We did previously check if the relocations
				 * were writable (access_ok), an error now
				 * would be a strange race with mprotect,
				 * having already demonstrated that we
				 * can read from this userspace address.
				 */
				offset = gen8_canonical_addr(offset & ~UPDATE);
				__put_user(offset,
					   &urelocs[r-stack].presumed_offset);
1512
			}
1513 1514 1515
		} while (r++, --count);
		urelocs += ARRAY_SIZE(stack);
	} while (remain);
1516
out:
1517
	reloc_cache_reset(&eb->reloc_cache);
1518
	return remain;
1519 1520 1521
}

static int
1522
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1523
{
1524
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1525 1526 1527 1528
	struct drm_i915_gem_relocation_entry *relocs =
		u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
	unsigned int i;
	int err;
1529 1530

	for (i = 0; i < entry->relocation_count; i++) {
1531
		u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1532

1533 1534 1535 1536
		if ((s64)offset < 0) {
			err = (int)offset;
			goto err;
		}
1537
	}
1538 1539 1540 1541
	err = 0;
err:
	reloc_cache_reset(&eb->reloc_cache);
	return err;
1542 1543
}

1544
static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1545
{
1546 1547 1548
	const char __user *addr, *end;
	unsigned long size;
	char __maybe_unused c;
1549

1550 1551 1552
	size = entry->relocation_count;
	if (size == 0)
		return 0;
1553

1554 1555
	if (size > N_RELOC(ULONG_MAX))
		return -EINVAL;
1556

1557 1558 1559 1560
	addr = u64_to_user_ptr(entry->relocs_ptr);
	size *= sizeof(struct drm_i915_gem_relocation_entry);
	if (!access_ok(VERIFY_READ, addr, size))
		return -EFAULT;
1561

1562 1563 1564 1565 1566
	end = addr + size;
	for (; addr < end; addr += PAGE_SIZE) {
		int err = __get_user(c, addr);
		if (err)
			return err;
1567
	}
1568
	return __get_user(c, end - 1);
1569
}
1570

1571
static int eb_copy_relocations(const struct i915_execbuffer *eb)
1572
{
1573 1574 1575
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1576

1577 1578 1579 1580 1581 1582
	for (i = 0; i < count; i++) {
		const unsigned int nreloc = eb->exec[i].relocation_count;
		struct drm_i915_gem_relocation_entry __user *urelocs;
		struct drm_i915_gem_relocation_entry *relocs;
		unsigned long size;
		unsigned long copied;
1583

1584 1585
		if (nreloc == 0)
			continue;
1586

1587 1588 1589
		err = check_relocations(&eb->exec[i]);
		if (err)
			goto err;
1590

1591 1592
		urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
		size = nreloc * sizeof(*relocs);
1593

1594 1595 1596 1597 1598 1599
		relocs = kvmalloc_array(size, 1, GFP_TEMPORARY);
		if (!relocs) {
			kvfree(relocs);
			err = -ENOMEM;
			goto err;
		}
1600

1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
		/* copy_from_user is limited to < 4GiB */
		copied = 0;
		do {
			unsigned int len =
				min_t(u64, BIT_ULL(31), size - copied);

			if (__copy_from_user((char *)relocs + copied,
					     (char *)urelocs + copied,
					     len)) {
				kvfree(relocs);
				err = -EFAULT;
				goto err;
			}
1614

1615 1616
			copied += len;
		} while (copied < size);
1617

1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
		/*
		 * As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		user_access_begin();
		for (copied = 0; copied < nreloc; copied++)
			unsafe_put_user(-1,
					&urelocs[copied].presumed_offset,
					end_user);
end_user:
		user_access_end();
1635

1636 1637
		eb->exec[i].relocs_ptr = (uintptr_t)relocs;
	}
1638

1639
	return 0;
1640

1641 1642 1643 1644 1645 1646 1647 1648
err:
	while (i--) {
		struct drm_i915_gem_relocation_entry *relocs =
			u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
		if (eb->exec[i].relocation_count)
			kvfree(relocs);
	}
	return err;
1649 1650
}

1651
static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1652
{
1653 1654
	const unsigned int count = eb->buffer_count;
	unsigned int i;
1655

1656 1657
	if (unlikely(i915.prefault_disable))
		return 0;
1658

1659 1660
	for (i = 0; i < count; i++) {
		int err;
1661

1662 1663 1664 1665
		err = check_relocations(&eb->exec[i]);
		if (err)
			return err;
	}
1666

1667
	return 0;
1668 1669
}

1670
static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1671
{
1672
	struct drm_device *dev = &eb->i915->drm;
1673
	bool have_copy = false;
1674
	struct i915_vma *vma;
1675 1676 1677 1678 1679 1680 1681
	int err = 0;

repeat:
	if (signal_pending(current)) {
		err = -ERESTARTSYS;
		goto out;
	}
1682

1683
	/* We may process another execbuffer during the unlock... */
1684
	eb_reset_vmas(eb);
1685 1686
	mutex_unlock(&dev->struct_mutex);

1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
	/*
	 * We take 3 passes through the slowpatch.
	 *
	 * 1 - we try to just prefault all the user relocation entries and
	 * then attempt to reuse the atomic pagefault disabled fast path again.
	 *
	 * 2 - we copy the user entries to a local buffer here outside of the
	 * local and allow ourselves to wait upon any rendering before
	 * relocations
	 *
	 * 3 - we already have a local copy of the relocation entries, but
	 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
	 */
	if (!err) {
		err = eb_prefault_relocations(eb);
	} else if (!have_copy) {
		err = eb_copy_relocations(eb);
		have_copy = err == 0;
	} else {
		cond_resched();
		err = 0;
1708
	}
1709 1710 1711
	if (err) {
		mutex_lock(&dev->struct_mutex);
		goto out;
1712 1713
	}

1714 1715 1716
	/* A frequent cause for EAGAIN are currently unavailable client pages */
	flush_workqueue(eb->i915->mm.userptr_wq);

1717 1718
	err = i915_mutex_lock_interruptible(dev);
	if (err) {
1719
		mutex_lock(&dev->struct_mutex);
1720
		goto out;
1721 1722
	}

1723
	/* reacquire the objects */
1724 1725
	err = eb_lookup_vmas(eb);
	if (err)
1726
		goto err;
1727

1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
	list_for_each_entry(vma, &eb->relocs, reloc_link) {
		if (!have_copy) {
			pagefault_disable();
			err = eb_relocate_vma(eb, vma);
			pagefault_enable();
			if (err)
				goto repeat;
		} else {
			err = eb_relocate_vma_slow(eb, vma);
			if (err)
				goto err;
		}
1740 1741
	}

1742 1743
	/*
	 * Leave the user relocations as are, this is the painfully slow path,
1744 1745 1746 1747 1748 1749
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
	if (err == -EAGAIN)
		goto repeat;

out:
	if (have_copy) {
		const unsigned int count = eb->buffer_count;
		unsigned int i;

		for (i = 0; i < count; i++) {
			const struct drm_i915_gem_exec_object2 *entry =
				&eb->exec[i];
			struct drm_i915_gem_relocation_entry *relocs;

			if (!entry->relocation_count)
				continue;

			relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
			kvfree(relocs);
		}
	}

	return err ?: have_copy;
1772 1773
}

1774
static int eb_relocate(struct i915_execbuffer *eb)
1775
{
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
	if (eb_lookup_vmas(eb))
		goto slow;

	/* The objects are in their final locations, apply the relocations. */
	if (eb->args->flags & __EXEC_HAS_RELOC) {
		struct i915_vma *vma;

		list_for_each_entry(vma, &eb->relocs, reloc_link) {
			if (eb_relocate_vma(eb, vma))
				goto slow;
		}
	}

	return 0;

slow:
	return eb_relocate_slow(eb);
}

1795
static void eb_export_fence(struct i915_vma *vma,
1796 1797 1798
			    struct drm_i915_gem_request *req,
			    unsigned int flags)
{
1799
	struct reservation_object *resv = vma->resv;
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818

	/*
	 * Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
	reservation_object_lock(resv, NULL);
	if (flags & EXEC_OBJECT_WRITE)
		reservation_object_add_excl_fence(resv, &req->fence);
	else if (reservation_object_reserve_shared(resv) == 0)
		reservation_object_add_shared_fence(resv, &req->fence);
	reservation_object_unlock(resv);
}

static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
	const unsigned int count = eb->buffer_count;
	unsigned int i;
	int err;
1819

1820 1821 1822
	for (i = 0; i < count; i++) {
		const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
		struct i915_vma *vma = exec_to_vma(entry);
1823
		struct drm_i915_gem_object *obj = vma->obj;
1824

1825
		if (entry->flags & EXEC_OBJECT_CAPTURE) {
1826 1827 1828 1829 1830 1831
			struct i915_gem_capture_list *capture;

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (unlikely(!capture))
				return -ENOMEM;

1832
			capture->next = eb->request->capture_list;
1833
			capture->vma = vma;
1834
			eb->request->capture_list = capture;
1835 1836
		}

1837 1838
		if (entry->flags & EXEC_OBJECT_ASYNC)
			goto skip_flushes;
1839

1840
		if (unlikely(obj->cache_dirty && !obj->cache_coherent))
1841 1842
			i915_gem_clflush_object(obj, 0);

1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857
		err = i915_gem_request_await_object
			(eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
		if (err)
			return err;

skip_flushes:
		i915_vma_move_to_active(vma, eb->request, entry->flags);
		__eb_unreserve_vma(vma, entry);
		vma->exec_entry = NULL;
	}

	for (i = 0; i < count; i++) {
		const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
		struct i915_vma *vma = exec_to_vma(entry);

1858
		eb_export_fence(vma, eb->request, entry->flags);
1859 1860
		if (unlikely(entry->flags & __EXEC_OBJECT_HAS_REF))
			i915_vma_put(vma);
1861
	}
1862
	eb->exec = NULL;
1863

1864
	/* Unconditionally flush any chipset caches (for streaming writes). */
1865
	i915_gem_chipset_flush(eb->i915);
1866

1867
	/* Unconditionally invalidate GPU caches and TLBs. */
1868
	return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
1869 1870
}

1871
static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1872
{
1873
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1874 1875
		return false;

C
Chris Wilson 已提交
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
	/* Kernel clipping was a DRI1 misfeature */
	if (exec->num_cliprects || exec->cliprects_ptr)
		return false;

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1891 1892
}

1893 1894 1895 1896 1897 1898 1899
void i915_vma_move_to_active(struct i915_vma *vma,
			     struct drm_i915_gem_request *req,
			     unsigned int flags)
{
	struct drm_i915_gem_object *obj = vma->obj;
	const unsigned int idx = req->engine->id;

1900
	lockdep_assert_held(&req->i915->drm.struct_mutex);
1901 1902
	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

1903 1904
	/*
	 * Add a reference if we're newly entering the active list.
1905 1906 1907 1908 1909 1910
	 * The order in which we add operations to the retirement queue is
	 * vital here: mark_active adds to the start of the callback list,
	 * such that subsequent callbacks are called first. Therefore we
	 * add the active reference first and queue for it to be dropped
	 * *last*.
	 */
1911 1912 1913 1914 1915
	if (!i915_vma_is_active(vma))
		obj->active_count++;
	i915_vma_set_active(vma, idx);
	i915_gem_active_set(&vma->last_read[idx], req);
	list_move_tail(&vma->vm_link, &vma->vm->active_list);
1916

1917
	obj->base.write_domain = 0;
1918
	if (flags & EXEC_OBJECT_WRITE) {
1919 1920
		obj->base.write_domain = I915_GEM_DOMAIN_RENDER;

1921 1922
		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
			i915_gem_active_set(&obj->frontbuffer_write, req);
1923

1924
		obj->base.read_domains = 0;
1925
	}
1926
	obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
1927

1928 1929
	if (flags & EXEC_OBJECT_NEEDS_FENCE)
		i915_gem_active_set(&vma->last_fence, req);
1930 1931
}

1932
static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1933
{
1934 1935
	u32 *cs;
	int i;
1936

1937
	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1938 1939 1940
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1941

1942
	cs = intel_ring_begin(req, 4 * 2 + 2);
1943 1944
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1945

1946
	*cs++ = MI_LOAD_REGISTER_IMM(4);
1947
	for (i = 0; i < 4; i++) {
1948 1949
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1950
	}
1951
	*cs++ = MI_NOOP;
1952
	intel_ring_advance(req, cs);
1953 1954 1955 1956

	return 0;
}

1957
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1958 1959
{
	struct drm_i915_gem_object *shadow_batch_obj;
1960
	struct i915_vma *vma;
1961
	int err;
1962

1963 1964
	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
1965
	if (IS_ERR(shadow_batch_obj))
1966
		return ERR_CAST(shadow_batch_obj);
1967

1968
	err = intel_engine_cmd_parser(eb->engine,
1969
				      eb->batch->obj,
1970
				      shadow_batch_obj,
1971 1972
				      eb->batch_start_offset,
				      eb->batch_len,
1973
				      is_master);
1974 1975
	if (err) {
		if (err == -EACCES) /* unhandled chained batch */
C
Chris Wilson 已提交
1976 1977
			vma = NULL;
		else
1978
			vma = ERR_PTR(err);
C
Chris Wilson 已提交
1979 1980
		goto out;
	}
1981

C
Chris Wilson 已提交
1982 1983 1984
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
1985

1986
	vma->exec_entry =
1987 1988
		memset(&eb->exec[eb->buffer_count++],
		       0, sizeof(*vma->exec_entry));
1989
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
1990
	__exec_to_vma(vma->exec_entry) = (uintptr_t)i915_vma_get(vma);
1991

C
Chris Wilson 已提交
1992
out:
C
Chris Wilson 已提交
1993
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
1994
	return vma;
1995
}
1996

1997
static void
1998
add_to_client(struct drm_i915_gem_request *req, struct drm_file *file)
1999 2000 2001 2002 2003
{
	req->file_priv = file->driver_priv;
	list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
}

2004
static int eb_submit(struct i915_execbuffer *eb)
2005
{
2006
	int err;
2007

2008 2009 2010
	err = eb_move_to_gpu(eb);
	if (err)
		return err;
2011

2012 2013 2014
	err = i915_switch_context(eb->request);
	if (err)
		return err;
2015

2016
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2017 2018 2019
		err = i915_reset_gen7_sol_offsets(eb->request);
		if (err)
			return err;
2020 2021
	}

2022
	err = eb->engine->emit_bb_start(eb->request,
2023 2024 2025
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
2026 2027 2028
					eb->batch_flags);
	if (err)
		return err;
2029

C
Chris Wilson 已提交
2030
	return 0;
2031 2032
}

2033 2034
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
2035
 * The engine index is returned.
2036
 */
2037
static unsigned int
2038 2039
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
2040 2041 2042
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

2043
	/* Check whether the file_priv has already selected one ring. */
2044 2045 2046
	if ((int)file_priv->bsd_engine < 0)
		file_priv->bsd_engine = atomic_fetch_xor(1,
			 &dev_priv->mm.bsd_engine_dispatch_index);
2047

2048
	return file_priv->bsd_engine;
2049 2050
}

2051 2052
#define I915_USER_RINGS (4)

2053
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
2054 2055 2056 2057 2058 2059 2060
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

2061 2062 2063 2064
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
2065 2066
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2067
	struct intel_engine_cs *engine;
2068 2069 2070

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
2071
		return NULL;
2072 2073 2074 2075 2076 2077
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
2078
		return NULL;
2079 2080 2081 2082 2083 2084
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2085
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
2086 2087
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
2088
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
2089 2090 2091 2092
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
2093
			return NULL;
2094 2095
		}

2096
		engine = dev_priv->engine[_VCS(bsd_idx)];
2097
	} else {
2098
		engine = dev_priv->engine[user_ring_map[user_ring_id]];
2099 2100
	}

2101
	if (!engine) {
2102
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
2103
		return NULL;
2104 2105
	}

2106
	return engine;
2107 2108
}

2109
static int
2110
i915_gem_do_execbuffer(struct drm_device *dev,
2111 2112
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
2113
		       struct drm_i915_gem_exec_object2 *exec)
2114
{
2115
	struct i915_execbuffer eb;
2116 2117 2118
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
2119
	int err;
2120

2121 2122
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
		     ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2123

2124 2125 2126
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
2127
	if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2128
		args->flags |= __EXEC_HAS_RELOC;
2129
	eb.exec = exec;
2130 2131 2132 2133
	eb.ctx = NULL;
	eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(eb.i915))
		eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
2134 2135
	reloc_cache_init(&eb.reloc_cache, eb.i915);

2136
	eb.buffer_count = args->buffer_count;
2137 2138 2139
	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

2140
	eb.batch_flags = 0;
2141
	if (args->flags & I915_EXEC_SECURE) {
2142
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2143 2144
		    return -EPERM;

2145
		eb.batch_flags |= I915_DISPATCH_SECURE;
2146
	}
2147
	if (args->flags & I915_EXEC_IS_PINNED)
2148
		eb.batch_flags |= I915_DISPATCH_PINNED;
2149

2150 2151
	eb.engine = eb_select_engine(eb.i915, file, args);
	if (!eb.engine)
2152 2153
		return -EINVAL;

2154
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
2155
		if (!HAS_RESOURCE_STREAMER(eb.i915)) {
2156 2157 2158
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
2159
		if (eb.engine->id != RCS) {
2160
			DRM_DEBUG("RS is not available on %s\n",
2161
				 eb.engine->name);
2162 2163 2164
			return -EINVAL;
		}

2165
		eb.batch_flags |= I915_DISPATCH_RS;
2166 2167
	}

2168 2169
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2170 2171
		if (!in_fence)
			return -EINVAL;
2172 2173 2174 2175 2176
	}

	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
2177
			err = out_fence_fd;
2178
			goto err_in_fence;
2179 2180 2181
		}
	}

2182 2183 2184 2185 2186
	if (eb_create(&eb))
		return -ENOMEM;

	/*
	 * Take a local wakeref for preparing to dispatch the execbuf as
2187 2188 2189 2190 2191
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
2192
	intel_runtime_pm_get(eb.i915);
2193 2194 2195
	err = i915_mutex_lock_interruptible(dev);
	if (err)
		goto err_rpm;
2196

2197 2198 2199
	err = eb_select_context(&eb);
	if (unlikely(err))
		goto err_unlock;
2200

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
	err = eb_relocate(&eb);
	if (err)
		/*
		 * If the user expects the execobject.offset and
		 * reloc.presumed_offset to be an exact match,
		 * as for using NO_RELOC, then we cannot update
		 * the execobject.offset until we have completed
		 * relocation.
		 */
		args->flags &= ~__EXEC_HAS_RELOC;
	if (err < 0)
		goto err_vma;
2213

2214
	if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
2215
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2216 2217
		err = -EINVAL;
		goto err_vma;
2218
	}
2219 2220
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2221
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2222 2223
		err = -EINVAL;
		goto err_vma;
2224
	}
2225

2226
	if (eb.engine->needs_cmd_parser && eb.batch_len) {
2227 2228
		struct i915_vma *vma;

2229
		vma = eb_parse(&eb, drm_is_current_master(file));
2230
		if (IS_ERR(vma)) {
2231 2232
			err = PTR_ERR(vma);
			goto err_vma;
2233
		}
2234

2235
		if (vma) {
2236 2237 2238 2239 2240 2241 2242 2243 2244
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
2245
			eb.batch_flags |= I915_DISPATCH_SECURE;
2246 2247
			eb.batch_start_offset = 0;
			eb.batch = vma;
2248
		}
2249 2250
	}

2251 2252
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
2253

2254 2255
	/*
	 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2256
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
2257
	 * hsw should have this fixed, but bdw mucks it up again. */
2258
	if (eb.batch_flags & I915_DISPATCH_SECURE) {
C
Chris Wilson 已提交
2259
		struct i915_vma *vma;
2260

2261 2262 2263 2264 2265 2266
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
2267
		 *   so we don't really have issues with multiple objects not
2268 2269 2270
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
2271
		vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
C
Chris Wilson 已提交
2272
		if (IS_ERR(vma)) {
2273 2274
			err = PTR_ERR(vma);
			goto err_vma;
C
Chris Wilson 已提交
2275
		}
2276

2277
		eb.batch = vma;
2278
	}
2279

2280 2281 2282
	/* All GPU relocation batches must be submitted prior to the user rq */
	GEM_BUG_ON(eb.reloc_cache.rq);

2283
	/* Allocate a request for this batch buffer nice and early. */
2284 2285
	eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
	if (IS_ERR(eb.request)) {
2286
		err = PTR_ERR(eb.request);
2287
		goto err_batch_unpin;
2288
	}
2289

2290
	if (in_fence) {
2291 2292
		err = i915_gem_request_await_dma_fence(eb.request, in_fence);
		if (err < 0)
2293 2294 2295 2296
			goto err_request;
	}

	if (out_fence_fd != -1) {
2297
		out_fence = sync_file_create(&eb.request->fence);
2298
		if (!out_fence) {
2299
			err = -ENOMEM;
2300 2301 2302 2303
			goto err_request;
		}
	}

2304 2305
	/*
	 * Whilst this request exists, batch_obj will be on the
2306 2307 2308 2309 2310
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
2311
	eb.request->batch = eb.batch;
2312

2313 2314
	trace_i915_gem_request_queue(eb.request, eb.batch_flags);
	err = eb_submit(&eb);
2315
err_request:
2316
	__i915_add_request(eb.request, err == 0);
2317
	add_to_client(eb.request, file);
2318

2319
	if (out_fence) {
2320
		if (err == 0) {
2321 2322 2323 2324 2325 2326 2327 2328
			fd_install(out_fence_fd, out_fence->file);
			args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
2329

2330
err_batch_unpin:
2331
	if (eb.batch_flags & I915_DISPATCH_SECURE)
2332
		i915_vma_unpin(eb.batch);
2333 2334 2335 2336 2337
err_vma:
	if (eb.exec)
		eb_release_vmas(&eb);
	i915_gem_context_put(eb.ctx);
err_unlock:
2338
	mutex_unlock(&dev->struct_mutex);
2339
err_rpm:
2340
	intel_runtime_pm_put(eb.i915);
2341
	eb_destroy(&eb);
2342 2343
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
2344
err_in_fence:
2345
	dma_fence_put(in_fence);
2346
	return err;
2347 2348 2349 2350 2351 2352 2353 2354 2355 2356
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
2357
	const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
2358 2359 2360 2361
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2362 2363
	unsigned int i;
	int err;
2364

2365 2366
	if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
2367 2368 2369
		return -EINVAL;
	}

2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383
	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
	i915_execbuffer2_set_context_id(exec2, 0);

	if (!i915_gem_check_execbuffer(&exec2))
		return -EINVAL;

2384
	/* Copy in the exec list from userland */
2385 2386 2387 2388
	exec_list = kvmalloc_array(args->buffer_count, sizeof(*exec_list),
				   __GFP_NOWARN | GFP_TEMPORARY);
	exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
				    __GFP_NOWARN | GFP_TEMPORARY);
2389
	if (exec_list == NULL || exec2_list == NULL) {
2390
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2391
			  args->buffer_count);
M
Michal Hocko 已提交
2392 2393
		kvfree(exec_list);
		kvfree(exec2_list);
2394 2395
		return -ENOMEM;
	}
2396
	err = copy_from_user(exec_list,
2397
			     u64_to_user_ptr(args->buffers_ptr),
2398
			     sizeof(*exec_list) * args->buffer_count);
2399
	if (err) {
2400
		DRM_DEBUG("copy %d exec entries failed %d\n",
2401
			  args->buffer_count, err);
M
Michal Hocko 已提交
2402 2403
		kvfree(exec_list);
		kvfree(exec2_list);
2404 2405 2406 2407 2408 2409 2410 2411 2412
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
2413
		if (INTEL_GEN(to_i915(dev)) < 4)
2414 2415 2416 2417 2418
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

2419 2420
	err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
	if (exec2.flags & __EXEC_HAS_RELOC) {
2421
		struct drm_i915_gem_exec_object __user *user_exec_list =
2422
			u64_to_user_ptr(args->buffers_ptr);
2423

2424
		/* Copy the new buffer offsets back to the user's exec list. */
2425
		for (i = 0; i < args->buffer_count; i++) {
2426 2427 2428
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2429
			exec2_list[i].offset =
2430 2431 2432 2433 2434
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			exec2_list[i].offset &= PIN_OFFSET_MASK;
			if (__copy_to_user(&user_exec_list[i].offset,
					   &exec2_list[i].offset,
					   sizeof(user_exec_list[i].offset)))
2435
				break;
2436 2437 2438
		}
	}

M
Michal Hocko 已提交
2439 2440
	kvfree(exec_list);
	kvfree(exec2_list);
2441
	return err;
2442 2443 2444 2445 2446 2447
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
2448
	const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
2449
	struct drm_i915_gem_execbuffer2 *args = data;
2450 2451
	struct drm_i915_gem_exec_object2 *exec2_list;
	int err;
2452

2453
	if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
2454
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
2455 2456 2457
		return -EINVAL;
	}

2458 2459 2460 2461 2462 2463
	if (!i915_gem_check_execbuffer(args))
		return -EINVAL;

	/* Allocate an extra slot for use by the command parser */
	exec2_list = kvmalloc_array(args->buffer_count + 1, sz,
				    __GFP_NOWARN | GFP_TEMPORARY);
2464
	if (exec2_list == NULL) {
2465
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2466 2467 2468
			  args->buffer_count);
		return -ENOMEM;
	}
2469 2470 2471 2472
	if (copy_from_user(exec2_list,
			   u64_to_user_ptr(args->buffers_ptr),
			   sizeof(*exec2_list) * args->buffer_count)) {
		DRM_DEBUG("copy %d exec entries failed\n", args->buffer_count);
M
Michal Hocko 已提交
2473
		kvfree(exec2_list);
2474 2475 2476
		return -EFAULT;
	}

2477 2478 2479 2480 2481 2482 2483 2484 2485
	err = i915_gem_do_execbuffer(dev, file, args, exec2_list);

	/*
	 * Now that we have begun execution of the batchbuffer, we ignore
	 * any new error after this point. Also given that we have already
	 * updated the associated relocations, we try to write out the current
	 * object locations irrespective of any error.
	 */
	if (args->flags & __EXEC_HAS_RELOC) {
2486
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
2487 2488
			u64_to_user_ptr(args->buffers_ptr);
		unsigned int i;
2489

2490 2491
		/* Copy the new buffer offsets back to the user's exec list. */
		user_access_begin();
2492
		for (i = 0; i < args->buffer_count; i++) {
2493 2494 2495
			if (!(exec2_list[i].offset & UPDATE))
				continue;

2496
			exec2_list[i].offset =
2497 2498 2499 2500
				gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
			unsafe_put_user(exec2_list[i].offset,
					&user_exec_list[i].offset,
					end_user);
2501
		}
2502 2503
end_user:
		user_access_end();
2504 2505
	}

2506
	args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
M
Michal Hocko 已提交
2507
	kvfree(exec2_list);
2508
	return err;
2509
}