i915_gem_execbuffer.c 51.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
31
#include <linux/sync_file.h>
32 33
#include <linux/uaccess.h>

34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
36

37
#include "i915_drv.h"
38
#include "i915_gem_clflush.h"
39 40
#include "i915_trace.h"
#include "intel_drv.h"
41
#include "intel_frontbuffer.h"
42

43 44
#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */

45 46 47 48 49
#define  __EXEC_OBJECT_HAS_PIN		(1<<31)
#define  __EXEC_OBJECT_HAS_FENCE	(1<<30)
#define  __EXEC_OBJECT_NEEDS_MAP	(1<<29)
#define  __EXEC_OBJECT_NEEDS_BIAS	(1<<28)
#define  __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
50 51

#define BATCH_OFFSET_BIAS (256*1024)
52

53 54
#define __I915_EXEC_ILLEGAL_FLAGS \
	(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
55

56
struct i915_execbuffer {
57
	struct drm_i915_private *i915;
58 59 60 61 62 63 64 65 66 67 68 69 70
	struct drm_file *file;
	struct drm_i915_gem_execbuffer2 *args;
	struct drm_i915_gem_exec_object2 *exec;
	struct intel_engine_cs *engine;
	struct i915_gem_context *ctx;
	struct i915_address_space *vm;
	struct i915_vma *batch;
	struct drm_i915_gem_request *request;
	u32 batch_start_offset;
	u32 batch_len;
	unsigned int dispatch_flags;
	struct drm_i915_gem_exec_object2 shadow_exec_entry;
	bool need_relocs;
71
	struct list_head vmas;
72 73 74 75 76 77
	struct reloc_cache {
		struct drm_mm_node node;
		unsigned long vaddr;
		unsigned int page;
		bool use_64bit_reloc : 1;
	} reloc_cache;
78 79
	int lut_mask;
	struct hlist_head *buckets;
80 81
};

82 83 84 85 86 87 88 89
/*
 * As an alternative to creating a hashtable of handle-to-vma for a batch,
 * we used the last available reserved field in the execobject[] and stash
 * a link from the execobj to its vma.
 */
#define __exec_to_vma(ee) (ee)->rsvd2
#define exec_to_vma(ee) u64_to_ptr(struct i915_vma, __exec_to_vma(ee))

90
static int eb_create(struct i915_execbuffer *eb)
91
{
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	if ((eb->args->flags & I915_EXEC_HANDLE_LUT) == 0) {
		unsigned int size = 1 + ilog2(eb->args->buffer_count);

		do {
			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
					      GFP_TEMPORARY |
					      __GFP_NORETRY |
					      __GFP_NOWARN);
			if (eb->buckets)
				break;
		} while (--size);

		if (unlikely(!eb->buckets)) {
			eb->buckets = kzalloc(sizeof(struct hlist_head),
					      GFP_TEMPORARY);
			if (unlikely(!eb->buckets))
				return -ENOMEM;
		}
110

111
		eb->lut_mask = size;
112
	} else {
113
		eb->lut_mask = -eb->args->buffer_count;
114
	}
115

116
	return 0;
117 118
}

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
static inline void
__eb_unreserve_vma(struct i915_vma *vma,
		   const struct drm_i915_gem_exec_object2 *entry)
{
	if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
		i915_vma_unpin_fence(vma);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
		__i915_vma_unpin(vma);
}

static void
eb_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

	__eb_unreserve_vma(vma, entry);
	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

139
static void
140
eb_reset(struct i915_execbuffer *eb)
141
{
142 143
	struct i915_vma *vma;

144
	list_for_each_entry(vma, &eb->vmas, exec_link) {
145 146 147 148 149
		eb_unreserve_vma(vma);
		i915_vma_put(vma);
		vma->exec_entry = NULL;
	}

150 151 152
	if (eb->lut_mask >= 0)
		memset(eb->buckets, 0,
		       sizeof(struct hlist_head) << eb->lut_mask);
153 154
}

155 156
static bool
eb_add_vma(struct i915_execbuffer *eb, struct i915_vma *vma, int i)
157
{
158 159 160 161 162 163 164 165 166 167 168 169 170 171
	if (unlikely(vma->exec_entry)) {
		DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
			  eb->exec[i].handle, i);
		return false;
	}
	list_add_tail(&vma->exec_link, &eb->vmas);

	vma->exec_entry = &eb->exec[i];
	if (eb->lut_mask >= 0) {
		vma->exec_handle = eb->exec[i].handle;
		hlist_add_head(&vma->exec_node,
			       &eb->buckets[hash_32(vma->exec_handle,
						    eb->lut_mask)]);
	}
172

173 174 175 176
	i915_vma_get(vma);
	__exec_to_vma(&eb->exec[i]) = (uintptr_t)vma;
	return true;
}
177

178 179 180 181 182 183 184 185 186 187 188
static inline struct hlist_head *
ht_head(const struct i915_gem_context *ctx, u32 handle)
{
	return &ctx->vma_lut.ht[hash_32(handle, ctx->vma_lut.ht_bits)];
}

static inline bool
ht_needs_resize(const struct i915_gem_context *ctx)
{
	return (4*ctx->vma_lut.ht_count > 3*ctx->vma_lut.ht_size ||
		4*ctx->vma_lut.ht_count + 1 < ctx->vma_lut.ht_size);
189 190
}

191
static int
192
eb_lookup_vmas(struct i915_execbuffer *eb)
193
{
194 195 196 197 198
#define INTERMEDIATE BIT(0)
	const int count = eb->args->buffer_count;
	struct i915_vma *vma;
	int slow_pass = -1;
	int i;
199

200 201
	INIT_LIST_HEAD(&eb->vmas);

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	if (unlikely(eb->ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS))
		flush_work(&eb->ctx->vma_lut.resize);
	GEM_BUG_ON(eb->ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS);

	for (i = 0; i < count; i++) {
		__exec_to_vma(&eb->exec[i]) = 0;

		hlist_for_each_entry(vma,
				     ht_head(eb->ctx, eb->exec[i].handle),
				     ctx_node) {
			if (vma->ctx_handle != eb->exec[i].handle)
				continue;

			if (!eb_add_vma(eb, vma, i))
				return -EINVAL;

			goto next_vma;
		}

		if (slow_pass < 0)
			slow_pass = i;
next_vma: ;
	}

	if (slow_pass < 0)
		return 0;

229
	spin_lock(&eb->file->table_lock);
230 231
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
232 233
	for (i = slow_pass; i < count; i++) {
		struct drm_i915_gem_object *obj;
234

235 236 237 238 239 240
		if (__exec_to_vma(&eb->exec[i]))
			continue;

		obj = to_intel_bo(idr_find(&eb->file->object_idr,
					   eb->exec[i].handle));
		if (unlikely(!obj)) {
241
			spin_unlock(&eb->file->table_lock);
242 243 244
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				  eb->exec[i].handle, i);
			return -ENOENT;
245 246
		}

247
		__exec_to_vma(&eb->exec[i]) = INTERMEDIATE | (uintptr_t)obj;
248
	}
249
	spin_unlock(&eb->file->table_lock);
250

251 252
	for (i = slow_pass; i < count; i++) {
		struct drm_i915_gem_object *obj;
253

254 255
		if ((__exec_to_vma(&eb->exec[i]) & INTERMEDIATE) == 0)
			continue;
256

257 258 259 260 261 262 263 264
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
265 266
		obj = u64_to_ptr(struct drm_i915_gem_object,
				 __exec_to_vma(&eb->exec[i]) & ~INTERMEDIATE);
267
		vma = i915_vma_instance(obj, eb->vm, NULL);
C
Chris Wilson 已提交
268
		if (unlikely(IS_ERR(vma))) {
269
			DRM_DEBUG("Failed to lookup VMA\n");
270
			return PTR_ERR(vma);
271 272
		}

273 274 275 276 277 278 279 280 281 282 283
		/* First come, first served */
		if (!vma->ctx) {
			vma->ctx = eb->ctx;
			vma->ctx_handle = eb->exec[i].handle;
			hlist_add_head(&vma->ctx_node,
				       ht_head(eb->ctx, eb->exec[i].handle));
			eb->ctx->vma_lut.ht_count++;
			if (i915_vma_is_ggtt(vma)) {
				GEM_BUG_ON(obj->vma_hashed);
				obj->vma_hashed = vma;
			}
284
		}
285 286 287 288 289 290 291 292

		if (!eb_add_vma(eb, vma, i))
			return -EINVAL;
	}

	if (ht_needs_resize(eb->ctx)) {
		eb->ctx->vma_lut.ht_size |= I915_CTX_RESIZE_IN_PROGRESS;
		queue_work(system_highpri_wq, &eb->ctx->vma_lut.resize);
293 294
	}

295
	return 0;
296 297
#undef INTERMEDIATE
}
298

299 300 301 302 303
static struct i915_vma *
eb_get_batch(struct i915_execbuffer *eb)
{
	struct i915_vma *vma =
		exec_to_vma(&eb->exec[eb->args->buffer_count - 1]);
304

305
	/*
306 307 308 309 310 311 312
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
313
	 */
314 315
	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
316

317
	return vma;
318 319
}

320 321
static struct i915_vma *
eb_get_vma(struct i915_execbuffer *eb, unsigned long handle)
322
{
323 324
	if (eb->lut_mask < 0) {
		if (handle >= -eb->lut_mask)
325
			return NULL;
326
		return exec_to_vma(&eb->exec[handle]);
327 328
	} else {
		struct hlist_head *head;
329
		struct i915_vma *vma;
330

331
		head = &eb->buckets[hash_32(handle, eb->lut_mask)];
332
		hlist_for_each_entry(vma, head, exec_node) {
333 334
			if (vma->exec_handle == handle)
				return vma;
335 336 337
		}
		return NULL;
	}
338 339
}

340
static void eb_destroy(struct i915_execbuffer *eb)
341
{
342
	struct i915_vma *vma;
343

344
	list_for_each_entry(vma, &eb->vmas, exec_link) {
345 346
		if (!vma->exec_entry)
			continue;
347

348
		__eb_unreserve_vma(vma, vma->exec_entry);
349
		vma->exec_entry = NULL;
350
		i915_vma_put(vma);
351
	}
352 353 354

	i915_gem_context_put(eb->ctx);

355
	if (eb->lut_mask >= 0)
356
		kfree(eb->buckets);
357 358
}

359 360
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
361 362 363
	if (!i915_gem_object_has_struct_page(obj))
		return false;

364 365 366
	if (DBG_USE_CPU_RELOC)
		return DBG_USE_CPU_RELOC > 0;

367
	return (HAS_LLC(to_i915(obj->base.dev)) ||
368
		obj->cache_dirty ||
369 370 371
		obj->cache_level != I915_CACHE_NONE);
}

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
/* Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline uint64_t gen8_canonical_addr(uint64_t address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline uint64_t gen8_noncanonical_addr(uint64_t address)
{
	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
}

static inline uint64_t
391
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
392 393 394 395 396
		  uint64_t target_offset)
{
	return gen8_canonical_addr((int)reloc->delta + target_offset);
}

397 398
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
399
{
400
	cache->page = -1;
401
	cache->vaddr = 0;
402 403
	/* Must be a variable in the struct to allow GCC to unroll. */
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
404
	cache->node.allocated = false;
405
}
406

407 408 409 410 411 412 413 414
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
415 416
}

417 418
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

419 420 421 422 423 424 425 426
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

static void reloc_cache_reset(struct reloc_cache *cache)
427
{
428
	void *vaddr;
429

430 431
	if (!cache->vaddr)
		return;
432

433 434 435 436
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
437

438 439 440
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
441
		wmb();
442
		io_mapping_unmap_atomic((void __iomem *)vaddr);
443
		if (cache->node.allocated) {
444
			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
445 446 447

			ggtt->base.clear_range(&ggtt->base,
					       cache->node.start,
448
					       cache->node.size);
449 450 451
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
452
		}
453
	}
454 455 456

	cache->vaddr = 0;
	cache->page = -1;
457 458 459 460 461 462
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
			int page)
{
463 464 465 466 467 468 469
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
		int ret;
470

471 472 473 474 475 476
		ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
		if (ret)
			return ERR_PTR(ret);

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
477

478 479 480 481
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
482 483
	}

484 485
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
486
	cache->page = page;
487

488
	return vaddr;
489 490
}

491 492 493
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
			 int page)
494
{
495
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
496
	unsigned long offset;
497
	void *vaddr;
498

499
	if (cache->vaddr) {
500
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
501 502 503
	} else {
		struct i915_vma *vma;
		int ret;
504

505 506
		if (use_cpu_reloc(obj))
			return NULL;
507

508 509 510
		ret = i915_gem_object_set_to_gtt_domain(obj, true);
		if (ret)
			return ERR_PTR(ret);
511

512 513
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
					       PIN_MAPPABLE | PIN_NONBLOCK);
514 515
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
516
			ret = drm_mm_insert_node_in_range
517
				(&ggtt->base.mm, &cache->node,
518
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
519
				 0, ggtt->mappable_end,
520
				 DRM_MM_INSERT_LOW);
521 522
			if (ret) /* no inactive aperture space, use cpu reloc */
				return NULL;
523
		} else {
524
			ret = i915_vma_put_fence(vma);
525 526 527 528
			if (ret) {
				i915_vma_unpin(vma);
				return ERR_PTR(ret);
			}
529

530 531
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
532
		}
533
	}
534

535 536
	offset = cache->node.start;
	if (cache->node.allocated) {
537
		wmb();
538 539 540 541 542
		ggtt->base.insert_page(&ggtt->base,
				       i915_gem_object_get_dma_address(obj, page),
				       offset, I915_CACHE_NONE, 0);
	} else {
		offset += page << PAGE_SHIFT;
543 544
	}

545 546
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
							 offset);
547 548
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
549

550
	return vaddr;
551 552
}

553 554 555
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
			 int page)
556
{
557
	void *vaddr;
558

559 560 561 562 563 564 565 566
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
567 568
	}

569
	return vaddr;
570 571
}

572
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
573
{
574 575 576 577 578
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
579

580
		*addr = value;
581

582 583 584 585 586 587 588 589 590 591
		/* Writes to the same cacheline are serialised by the CPU
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
592 593 594
}

static int
595 596 597 598
relocate_entry(struct drm_i915_gem_object *obj,
	       const struct drm_i915_gem_relocation_entry *reloc,
	       struct reloc_cache *cache,
	       u64 target_offset)
599
{
600 601 602
	u64 offset = reloc->offset;
	bool wide = cache->use_64bit_reloc;
	void *vaddr;
603

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
	target_offset = relocation_target(reloc, target_offset);
repeat:
	vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
			cache->vaddr);

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
619 620 621 622 623
	}

	return 0;
}

624
static int
625 626 627
eb_relocate_entry(struct drm_i915_gem_object *obj,
		  struct i915_execbuffer *eb,
		  struct drm_i915_gem_relocation_entry *reloc)
628 629
{
	struct drm_gem_object *target_obj;
630
	struct drm_i915_gem_object *target_i915_obj;
631
	struct i915_vma *target_vma;
B
Ben Widawsky 已提交
632
	uint64_t target_offset;
633
	int ret;
634

635
	/* we've already hold a reference to all valid objects */
636 637
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
638
		return -ENOENT;
639 640
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
641

642
	target_offset = gen8_canonical_addr(target_vma->node.start);
643

644 645 646
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
647 648
	if (unlikely(IS_GEN6(eb->i915) &&
		     reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
649
		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
650
				    PIN_GLOBAL);
651 652 653
		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
			return ret;
	}
654

655
	/* Validate that the target is in a valid r/w GPU domain */
656
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
657
		DRM_DEBUG("reloc with multiple write domains: "
658 659 660 661 662 663
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
664
		return -EINVAL;
665
	}
666 667
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
668
		DRM_DEBUG("reloc with read/write non-GPU domains: "
669 670 671 672 673 674
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
675
		return -EINVAL;
676 677 678 679 680 681 682 683 684
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
685
		return 0;
686 687

	/* Check that the relocation address is valid... */
688
	if (unlikely(reloc->offset >
689
		     obj->base.size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
690
		DRM_DEBUG("Relocation beyond object bounds: "
691 692 693 694
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
695
		return -EINVAL;
696
	}
697
	if (unlikely(reloc->offset & 3)) {
698
		DRM_DEBUG("Relocation not 4-byte aligned: "
699 700 701
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
702
		return -EINVAL;
703 704
	}

705
	ret = relocate_entry(obj, reloc, &eb->reloc_cache, target_offset);
706 707 708
	if (ret)
		return ret;

709 710
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;
711
	return 0;
712 713
}

714
static int eb_relocate_vma(struct i915_vma *vma, struct i915_execbuffer *eb)
715
{
716 717
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
718
	struct drm_i915_gem_relocation_entry __user *user_relocs;
719
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
720
	int remain, ret = 0;
721

722
	user_relocs = u64_to_user_ptr(entry->relocs_ptr);
723

724 725 726
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
727 728 729 730
		unsigned long unwritten;
		unsigned int count;

		count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
731 732
		remain -= count;

733 734 735 736 737 738 739 740 741 742 743
		/* This is the fast path and we cannot handle a pagefault
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
		unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
		pagefault_enable();
		if (unlikely(unwritten)) {
744 745 746
			ret = -EFAULT;
			goto out;
		}
747

748 749
		do {
			u64 offset = r->presumed_offset;
750

751
			ret = eb_relocate_entry(vma->obj, eb, r);
752
			if (ret)
753
				goto out;
754

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
			if (r->presumed_offset != offset) {
				pagefault_disable();
				unwritten = __put_user(r->presumed_offset,
						       &user_relocs->presumed_offset);
				pagefault_enable();
				if (unlikely(unwritten)) {
					/* Note that reporting an error now
					 * leaves everything in an inconsistent
					 * state as we have *already* changed
					 * the relocation value inside the
					 * object. As we have not changed the
					 * reloc.presumed_offset or will not
					 * change the execobject.offset, on the
					 * call we may not rewrite the value
					 * inside the object, leaving it
					 * dangling and causing a GPU hang.
					 */
					ret = -EFAULT;
					goto out;
				}
775 776 777 778 779
			}

			user_relocs++;
			r++;
		} while (--count);
780 781
	}

782
out:
783
	reloc_cache_reset(&eb->reloc_cache);
784
	return ret;
785
#undef N_RELOC
786 787 788
}

static int
789 790 791
eb_relocate_vma_slow(struct i915_vma *vma,
		     struct i915_execbuffer *eb,
		     struct drm_i915_gem_relocation_entry *relocs)
792
{
793
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
794
	int i, ret = 0;
795 796

	for (i = 0; i < entry->relocation_count; i++) {
797
		ret = eb_relocate_entry(vma->obj, eb, &relocs[i]);
798
		if (ret)
799
			break;
800
	}
801
	reloc_cache_reset(&eb->reloc_cache);
802
	return ret;
803 804
}

805
static int eb_relocate(struct i915_execbuffer *eb)
806
{
807
	struct i915_vma *vma;
808 809
	int ret = 0;

810
	list_for_each_entry(vma, &eb->vmas, exec_link) {
811
		ret = eb_relocate_vma(vma, eb);
812
		if (ret)
813
			break;
814 815
	}

816
	return ret;
817 818
}

819 820 821 822 823 824
static bool only_mappable_for_reloc(unsigned int flags)
{
	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
		__EXEC_OBJECT_NEEDS_MAP;
}

825
static int
826 827 828
eb_reserve_vma(struct i915_vma *vma,
	       struct intel_engine_cs *engine,
	       bool *need_reloc)
829
{
830
	struct drm_i915_gem_object *obj = vma->obj;
831
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
832
	uint64_t flags;
833 834
	int ret;

835
	flags = PIN_USER;
836 837 838
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
		flags |= PIN_GLOBAL;

839
	if (!drm_mm_node_allocated(&vma->node)) {
840 841 842 843 844
		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
		 * limit address to the first 4GBs for unflagged objects.
		 */
		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
			flags |= PIN_ZONE_4G;
845 846 847 848
		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
			flags |= PIN_GLOBAL | PIN_MAPPABLE;
		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
849 850
		if (entry->flags & EXEC_OBJECT_PINNED)
			flags |= entry->offset | PIN_OFFSET_FIXED;
851 852
		if ((flags & PIN_MAPPABLE) == 0)
			flags |= PIN_HIGH;
853
	}
854

855 856 857 858 859
	ret = i915_vma_pin(vma,
			   entry->pad_to_size,
			   entry->alignment,
			   flags);
	if ((ret == -ENOSPC || ret == -E2BIG) &&
860
	    only_mappable_for_reloc(entry->flags))
861 862 863 864
		ret = i915_vma_pin(vma,
				   entry->pad_to_size,
				   entry->alignment,
				   flags & ~PIN_MAPPABLE);
865 866 867
	if (ret)
		return ret;

868 869
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

870
	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
871
		ret = i915_vma_get_fence(vma);
872 873
		if (ret)
			return ret;
874

875
		if (i915_vma_pin_fence(vma))
876
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
877 878
	}

879 880
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
881 882 883 884 885 886 887 888
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

889
	return 0;
890
}
891

892
static bool
893
need_reloc_mappable(struct i915_vma *vma)
894 895 896
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

897 898 899
	if (entry->relocation_count == 0)
		return false;

900
	if (!i915_vma_is_ggtt(vma))
901 902 903
		return false;

	/* See also use_cpu_reloc() */
904
	if (HAS_LLC(to_i915(vma->obj->base.dev)))
905 906 907 908 909 910 911 912 913 914 915 916
		return false;

	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

	return true;
}

static bool
eb_vma_misplaced(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
917

918 919
	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
		!i915_vma_is_ggtt(vma));
920

921
	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
922 923
		return true;

924 925 926
	if (vma->node.size < entry->pad_to_size)
		return true;

927 928 929 930
	if (entry->flags & EXEC_OBJECT_PINNED &&
	    vma->node.start != entry->offset)
		return true;

931 932 933 934
	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

935
	/* avoid costly ping-pong once a batch bo ended up non-mappable */
936 937
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
938 939
		return !only_mappable_for_reloc(entry->flags);

940 941 942 943
	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

944 945 946
	return false;
}

947
static int eb_reserve(struct i915_execbuffer *eb)
948
{
949 950
	const bool has_fenced_gpu_access = INTEL_GEN(eb->i915) < 4;
	const bool needs_unfenced_map = INTEL_INFO(eb->i915)->unfenced_needs_alignment;
951
	struct drm_i915_gem_object *obj;
952 953
	struct i915_vma *vma;
	struct list_head ordered_vmas;
954
	struct list_head pinned_vmas;
955
	int retry;
956

957
	INIT_LIST_HEAD(&ordered_vmas);
958
	INIT_LIST_HEAD(&pinned_vmas);
959
	while (!list_empty(&eb->vmas)) {
960 961 962
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

963
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_link);
964 965
		obj = vma->obj;
		entry = vma->exec_entry;
966

967
		if (eb->ctx->flags & CONTEXT_NO_ZEROMAP)
968 969
			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

970 971
		if (!has_fenced_gpu_access)
			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
972
		need_fence =
973 974
			(entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
			 needs_unfenced_map) &&
975
			i915_gem_object_is_tiled(vma->obj);
976
		need_mappable = need_fence || need_reloc_mappable(vma);
977

978
		if (entry->flags & EXEC_OBJECT_PINNED)
979
			list_move_tail(&vma->exec_link, &pinned_vmas);
980
		else if (need_mappable) {
981
			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
982
			list_move(&vma->exec_link, &ordered_vmas);
983
		} else
984
			list_move_tail(&vma->exec_link, &ordered_vmas);
985

986
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
987
		obj->base.pending_write_domain = 0;
988
	}
989 990
	list_splice(&ordered_vmas, &eb->vmas);
	list_splice(&pinned_vmas, &eb->vmas);
991 992 993 994 995 996 997 998 999 1000

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
1001
	 * This avoid unnecessary unbinding of later objects in order to make
1002 1003 1004 1005
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
1006
		int ret = 0;
1007 1008

		/* Unbind any ill-fitting objects or pin. */
1009
		list_for_each_entry(vma, &eb->vmas, exec_link) {
1010
			if (!drm_mm_node_allocated(&vma->node))
1011 1012
				continue;

1013
			if (eb_vma_misplaced(vma))
1014
				ret = i915_vma_unbind(vma);
1015
			else
1016
				ret = eb_reserve_vma(vma, eb->engine, &eb->need_relocs);
1017
			if (ret)
1018 1019 1020 1021
				goto err;
		}

		/* Bind fresh objects */
1022
		list_for_each_entry(vma, &eb->vmas, exec_link) {
1023
			if (drm_mm_node_allocated(&vma->node))
1024
				continue;
1025

1026
			ret = eb_reserve_vma(vma, eb->engine, &eb->need_relocs);
1027 1028
			if (ret)
				goto err;
1029 1030
		}

1031
err:
C
Chris Wilson 已提交
1032
		if (ret != -ENOSPC || retry++)
1033 1034
			return ret;

1035
		/* Decrement pin count for bound objects */
1036
		list_for_each_entry(vma, &eb->vmas, exec_link)
1037
			eb_unreserve_vma(vma);
1038

1039
		ret = i915_gem_evict_vm(eb->vm, true);
1040 1041 1042 1043 1044 1045
		if (ret)
			return ret;
	} while (1);
}

static int
1046
eb_relocate_slow(struct i915_execbuffer *eb)
1047
{
1048 1049
	const unsigned int count = eb->args->buffer_count;
	struct drm_device *dev = &eb->i915->drm;
1050
	struct drm_i915_gem_relocation_entry *reloc;
1051
	struct i915_vma *vma;
1052
	int *reloc_offset;
1053
	int i, total, ret;
1054

1055
	/* We may process another execbuffer during the unlock... */
1056
	eb_reset(eb);
1057 1058 1059 1060
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
1061
		total += eb->exec[i].relocation_count;
1062

M
Michal Hocko 已提交
1063 1064
	reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
	reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
1065
	if (reloc == NULL || reloc_offset == NULL) {
M
Michal Hocko 已提交
1066 1067
		kvfree(reloc);
		kvfree(reloc_offset);
1068 1069 1070 1071 1072 1073 1074
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
1075 1076
		u64 invalid_offset = (u64)-1;
		int j;
1077

1078
		user_relocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1079 1080

		if (copy_from_user(reloc+total, user_relocs,
1081
				   eb->exec[i].relocation_count * sizeof(*reloc))) {
1082 1083 1084 1085 1086
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

1087 1088 1089 1090 1091 1092 1093 1094 1095
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
1096
		for (j = 0; j < eb->exec[i].relocation_count; j++) {
1097 1098 1099
			if (__copy_to_user(&user_relocs[j].presumed_offset,
					   &invalid_offset,
					   sizeof(invalid_offset))) {
1100 1101 1102 1103 1104 1105
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

1106
		reloc_offset[i] = total;
1107
		total += eb->exec[i].relocation_count;
1108 1109 1110 1111 1112 1113 1114 1115
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

1116
	/* reacquire the objects */
1117
	ret = eb_lookup_vmas(eb);
1118 1119
	if (ret)
		goto err;
1120

1121
	ret = eb_reserve(eb);
1122 1123 1124
	if (ret)
		goto err;

1125
	list_for_each_entry(vma, &eb->vmas, exec_link) {
1126 1127 1128
		int idx = vma->exec_entry - eb->exec;

		ret = eb_relocate_vma_slow(vma, eb, reloc + reloc_offset[idx]);
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
M
Michal Hocko 已提交
1140 1141
	kvfree(reloc);
	kvfree(reloc_offset);
1142 1143 1144 1145
	return ret;
}

static int
1146
eb_move_to_gpu(struct i915_execbuffer *eb)
1147
{
1148
	struct i915_vma *vma;
1149
	int ret;
1150

1151
	list_for_each_entry(vma, &eb->vmas, exec_link) {
1152
		struct drm_i915_gem_object *obj = vma->obj;
1153

1154 1155 1156 1157 1158 1159 1160
		if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
			struct i915_gem_capture_list *capture;

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (unlikely(!capture))
				return -ENOMEM;

1161
			capture->next = eb->request->capture_list;
1162
			capture->vma = vma;
1163
			eb->request->capture_list = capture;
1164 1165
		}

1166 1167 1168
		if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
			continue;

1169
		if (unlikely(obj->cache_dirty && !obj->cache_coherent))
1170 1171
			i915_gem_clflush_object(obj, 0);

1172
		ret = i915_gem_request_await_object
1173
			(eb->request, obj, obj->base.pending_write_domain);
1174 1175
		if (ret)
			return ret;
1176 1177
	}

1178
	/* Unconditionally flush any chipset caches (for streaming writes). */
1179
	i915_gem_chipset_flush(eb->i915);
1180

1181
	/* Unconditionally invalidate GPU caches and TLBs. */
1182
	return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
1183 1184
}

1185 1186
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1187
{
1188
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1189 1190
		return false;

C
Chris Wilson 已提交
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
	/* Kernel clipping was a DRI1 misfeature */
	if (exec->num_cliprects || exec->cliprects_ptr)
		return false;

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1206 1207 1208
}

static int
1209 1210
validate_exec_list(struct drm_device *dev,
		   struct drm_i915_gem_exec_object2 *exec,
1211 1212
		   int count)
{
1213 1214
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1215 1216 1217
	unsigned invalid_flags;
	int i;

1218 1219 1220
	/* INTERNAL flags must not overlap with external ones */
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);

1221 1222 1223
	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(dev))
		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1224 1225

	for (i = 0; i < count; i++) {
1226
		char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1227 1228
		int length; /* limited by fault_in_pages_readable() */

1229
		if (exec[i].flags & invalid_flags)
1230 1231
			return -EINVAL;

1232 1233 1234 1235 1236 1237 1238 1239 1240
		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
		 * any non-page-aligned or non-canonical addresses.
		 */
		if (exec[i].flags & EXEC_OBJECT_PINNED) {
			if (exec[i].offset !=
			    gen8_canonical_addr(exec[i].offset & PAGE_MASK))
				return -EINVAL;
		}

1241 1242 1243 1244 1245 1246
		/* From drm_mm perspective address space is continuous,
		 * so from this point we're always using non-canonical
		 * form internally.
		 */
		exec[i].offset = gen8_noncanonical_addr(exec[i].offset);

1247 1248 1249
		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
			return -EINVAL;

1250 1251 1252 1253 1254 1255 1256 1257
		/* pad_to_size was once a reserved field, so sanitize it */
		if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
			if (offset_in_page(exec[i].pad_to_size))
				return -EINVAL;
		} else {
			exec[i].pad_to_size = 0;
		}

1258 1259 1260 1261 1262
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
1263
			return -EINVAL;
1264
		relocs_total += exec[i].relocation_count;
1265 1266 1267

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
1268 1269 1270 1271 1272
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
1273 1274 1275
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

1276
		if (likely(!i915.prefault_disable)) {
1277
			if (fault_in_pages_readable(ptr, length))
1278 1279
				return -EFAULT;
		}
1280 1281 1282 1283 1284
	}

	return 0;
}

1285
static int eb_select_context(struct i915_execbuffer *eb)
1286
{
1287
	unsigned int ctx_id = i915_execbuffer2_get_context_id(*eb->args);
1288
	struct i915_gem_context *ctx;
1289

1290 1291 1292
	ctx = i915_gem_context_lookup(eb->file->driver_priv, ctx_id);
	if (unlikely(IS_ERR(ctx)))
		return PTR_ERR(ctx);
1293

1294
	if (unlikely(i915_gem_context_is_banned(ctx))) {
1295
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1296
		return -EIO;
1297 1298
	}

1299 1300 1301 1302
	eb->ctx = i915_gem_context_get(ctx);
	eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;

	return 0;
1303 1304
}

1305 1306 1307 1308 1309 1310 1311
void i915_vma_move_to_active(struct i915_vma *vma,
			     struct drm_i915_gem_request *req,
			     unsigned int flags)
{
	struct drm_i915_gem_object *obj = vma->obj;
	const unsigned int idx = req->engine->id;

1312
	lockdep_assert_held(&req->i915->drm.struct_mutex);
1313 1314
	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

1315 1316 1317 1318 1319 1320 1321
	/* Add a reference if we're newly entering the active list.
	 * The order in which we add operations to the retirement queue is
	 * vital here: mark_active adds to the start of the callback list,
	 * such that subsequent callbacks are called first. Therefore we
	 * add the active reference first and queue for it to be dropped
	 * *last*.
	 */
1322 1323 1324 1325 1326
	if (!i915_vma_is_active(vma))
		obj->active_count++;
	i915_vma_set_active(vma, idx);
	i915_gem_active_set(&vma->last_read[idx], req);
	list_move_tail(&vma->vm_link, &vma->vm->active_list);
1327

1328
	obj->base.write_domain = 0;
1329
	if (flags & EXEC_OBJECT_WRITE) {
1330 1331
		obj->base.write_domain = I915_GEM_DOMAIN_RENDER;

1332 1333
		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
			i915_gem_active_set(&obj->frontbuffer_write, req);
1334

1335
		obj->base.read_domains = 0;
1336
	}
1337
	obj->base.read_domains |= I915_GEM_GPU_DOMAINS;
1338

1339 1340
	if (flags & EXEC_OBJECT_NEEDS_FENCE)
		i915_gem_active_set(&vma->last_fence, req);
1341 1342
}

1343 1344 1345 1346
static void eb_export_fence(struct drm_i915_gem_object *obj,
			    struct drm_i915_gem_request *req,
			    unsigned int flags)
{
1347
	struct reservation_object *resv = obj->resv;
1348 1349 1350 1351 1352

	/* Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
1353
	reservation_object_lock(resv, NULL);
1354 1355 1356 1357
	if (flags & EXEC_OBJECT_WRITE)
		reservation_object_add_excl_fence(resv, &req->fence);
	else if (reservation_object_reserve_shared(resv) == 0)
		reservation_object_add_shared_fence(resv, &req->fence);
1358
	reservation_object_unlock(resv);
1359 1360
}

1361
static void
1362
eb_move_to_active(struct i915_execbuffer *eb)
1363
{
1364
	struct i915_vma *vma;
1365

1366
	list_for_each_entry(vma, &eb->vmas, exec_link) {
1367
		struct drm_i915_gem_object *obj = vma->obj;
C
Chris Wilson 已提交
1368

1369
		obj->base.write_domain = obj->base.pending_write_domain;
1370 1371 1372
		if (obj->base.write_domain)
			vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
		else
1373 1374
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
1375

1376 1377
		i915_vma_move_to_active(vma, eb->request, vma->exec_entry->flags);
		eb_export_fence(obj, eb->request, vma->exec_entry->flags);
1378 1379 1380
	}
}

1381
static int
1382
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1383
{
1384 1385
	u32 *cs;
	int i;
1386

1387
	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1388 1389 1390
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1391

1392 1393 1394
	cs = intel_ring_begin(req, 4 * 3);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1395 1396

	for (i = 0; i < 4; i++) {
1397 1398 1399
		*cs++ = MI_LOAD_REGISTER_IMM(1);
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1400 1401
	}

1402
	intel_ring_advance(req, cs);
1403 1404 1405 1406

	return 0;
}

1407
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1408 1409
{
	struct drm_i915_gem_object *shadow_batch_obj;
1410
	struct i915_vma *vma;
1411 1412
	int ret;

1413 1414
	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
1415
	if (IS_ERR(shadow_batch_obj))
1416
		return ERR_CAST(shadow_batch_obj);
1417

1418 1419
	ret = intel_engine_cmd_parser(eb->engine,
				      eb->batch->obj,
1420
				      shadow_batch_obj,
1421 1422
				      eb->batch_start_offset,
				      eb->batch_len,
1423
				      is_master);
C
Chris Wilson 已提交
1424 1425 1426 1427 1428 1429 1430
	if (ret) {
		if (ret == -EACCES) /* unhandled chained batch */
			vma = NULL;
		else
			vma = ERR_PTR(ret);
		goto out;
	}
1431

C
Chris Wilson 已提交
1432 1433 1434
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
1435

1436 1437
	vma->exec_entry =
		memset(&eb->shadow_exec_entry, 0, sizeof(*vma->exec_entry));
C
Chris Wilson 已提交
1438
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1439
	i915_gem_object_get(shadow_batch_obj);
1440
	list_add_tail(&vma->exec_link, &eb->vmas);
1441

C
Chris Wilson 已提交
1442
out:
C
Chris Wilson 已提交
1443
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
1444
	return vma;
1445
}
1446

1447 1448 1449 1450 1451 1452 1453 1454
static void
add_to_client(struct drm_i915_gem_request *req,
	      struct drm_file *file)
{
	req->file_priv = file->driver_priv;
	list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
}

1455
static int
1456
execbuf_submit(struct i915_execbuffer *eb)
1457
{
C
Chris Wilson 已提交
1458
	int ret;
1459

1460
	ret = eb_move_to_gpu(eb);
1461
	if (ret)
C
Chris Wilson 已提交
1462
		return ret;
1463

1464
	ret = i915_switch_context(eb->request);
1465
	if (ret)
C
Chris Wilson 已提交
1466
		return ret;
1467

1468 1469
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(eb->request);
1470
		if (ret)
C
Chris Wilson 已提交
1471
			return ret;
1472 1473
	}

1474 1475 1476 1477 1478
	ret = eb->engine->emit_bb_start(eb->request,
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
					eb->dispatch_flags);
C
Chris Wilson 已提交
1479 1480
	if (ret)
		return ret;
1481

1482
	eb_move_to_active(eb);
1483

C
Chris Wilson 已提交
1484
	return 0;
1485 1486
}

1487 1488
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
1489
 * The engine index is returned.
1490
 */
1491
static unsigned int
1492 1493
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
1494 1495 1496
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

1497
	/* Check whether the file_priv has already selected one ring. */
1498 1499 1500
	if ((int)file_priv->bsd_engine < 0)
		file_priv->bsd_engine = atomic_fetch_xor(1,
			 &dev_priv->mm.bsd_engine_dispatch_index);
1501

1502
	return file_priv->bsd_engine;
1503 1504
}

1505 1506
#define I915_USER_RINGS (4)

1507
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1508 1509 1510 1511 1512 1513 1514
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

1515 1516 1517 1518
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
1519 1520
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1521
	struct intel_engine_cs *engine;
1522 1523 1524

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1525
		return NULL;
1526 1527 1528 1529 1530 1531
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
1532
		return NULL;
1533 1534 1535 1536 1537 1538
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1539
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
1540 1541
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
1542
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1543 1544 1545 1546
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
1547
			return NULL;
1548 1549
		}

1550
		engine = dev_priv->engine[_VCS(bsd_idx)];
1551
	} else {
1552
		engine = dev_priv->engine[user_ring_map[user_ring_id]];
1553 1554
	}

1555
	if (!engine) {
1556
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1557
		return NULL;
1558 1559
	}

1560
	return engine;
1561 1562
}

1563
static int
1564
i915_gem_do_execbuffer(struct drm_device *dev,
1565 1566
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1567
		       struct drm_i915_gem_exec_object2 *exec)
1568
{
1569
	struct i915_execbuffer eb;
1570 1571 1572
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
1573
	int ret;
1574

1575
	if (!i915_gem_check_execbuffer(args))
1576 1577
		return -EINVAL;

1578
	ret = validate_exec_list(dev, exec, args->buffer_count);
1579 1580 1581
	if (ret)
		return ret;

1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
	eb.exec = exec;
	eb.need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
	reloc_cache_init(&eb.reloc_cache, eb.i915);

	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

	eb.dispatch_flags = 0;
1593
	if (args->flags & I915_EXEC_SECURE) {
1594
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1595 1596
		    return -EPERM;

1597
		eb.dispatch_flags |= I915_DISPATCH_SECURE;
1598
	}
1599
	if (args->flags & I915_EXEC_IS_PINNED)
1600
		eb.dispatch_flags |= I915_DISPATCH_PINNED;
1601

1602 1603
	eb.engine = eb_select_engine(eb.i915, file, args);
	if (!eb.engine)
1604 1605
		return -EINVAL;

1606
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1607
		if (!HAS_RESOURCE_STREAMER(eb.i915)) {
1608 1609 1610
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
1611
		if (eb.engine->id != RCS) {
1612
			DRM_DEBUG("RS is not available on %s\n",
1613
				 eb.engine->name);
1614 1615 1616
			return -EINVAL;
		}

1617
		eb.dispatch_flags |= I915_DISPATCH_RS;
1618 1619
	}

1620 1621
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
1622 1623
		if (!in_fence)
			return -EINVAL;
1624 1625 1626 1627 1628 1629
	}

	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
			ret = out_fence_fd;
1630
			goto err_in_fence;
1631 1632 1633
		}
	}

1634 1635 1636 1637 1638 1639
	/* Take a local wakeref for preparing to dispatch the execbuf as
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
1640
	intel_runtime_pm_get(eb.i915);
1641

1642 1643 1644 1645
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1646 1647
	ret = eb_select_context(&eb);
	if (ret) {
1648 1649
		mutex_unlock(&dev->struct_mutex);
		goto pre_mutex_err;
1650
	}
1651

1652 1653
	if (eb_create(&eb)) {
		i915_gem_context_put(eb.ctx);
1654 1655 1656 1657 1658
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1659
	/* Look up object handles */
1660
	ret = eb_lookup_vmas(&eb);
1661 1662
	if (ret)
		goto err;
1663

1664
	/* take note of the batch buffer before we might reorder the lists */
1665
	eb.batch = eb_get_batch(&eb);
1666

1667
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1668
	ret = eb_reserve(&eb);
1669 1670 1671 1672
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1673 1674
	if (eb.need_relocs)
		ret = eb_relocate(&eb);
1675 1676
	if (ret) {
		if (ret == -EFAULT) {
1677
			ret = eb_relocate_slow(&eb);
1678 1679 1680 1681 1682 1683 1684
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
1685
	if (eb.batch->obj->base.pending_write_domain) {
1686
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1687 1688 1689
		ret = -EINVAL;
		goto err;
	}
1690 1691
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
1692 1693 1694 1695
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
		ret = -EINVAL;
		goto err;
	}
1696

1697
	if (eb.engine->needs_cmd_parser && eb.batch_len) {
1698 1699
		struct i915_vma *vma;

1700
		vma = eb_parse(&eb, drm_is_current_master(file));
1701 1702
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
1703 1704
			goto err;
		}
1705

1706
		if (vma) {
1707 1708 1709 1710 1711 1712 1713 1714 1715
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
1716 1717 1718
			eb.dispatch_flags |= I915_DISPATCH_SECURE;
			eb.batch_start_offset = 0;
			eb.batch = vma;
1719
		}
1720 1721
	}

1722 1723 1724
	eb.batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
1725

1726 1727
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1728
	 * hsw should have this fixed, but bdw mucks it up again. */
1729 1730
	if (eb.dispatch_flags & I915_DISPATCH_SECURE) {
		struct drm_i915_gem_object *obj = eb.batch->obj;
C
Chris Wilson 已提交
1731
		struct i915_vma *vma;
1732

1733 1734 1735 1736 1737 1738
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
1739
		 *   so we don't really have issues with multiple objects not
1740 1741 1742
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
C
Chris Wilson 已提交
1743 1744 1745
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
1746
			goto err;
C
Chris Wilson 已提交
1747
		}
1748

1749
		eb.batch = vma;
1750
	}
1751

1752
	/* Allocate a request for this batch buffer nice and early. */
1753 1754 1755
	eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
	if (IS_ERR(eb.request)) {
		ret = PTR_ERR(eb.request);
1756
		goto err_batch_unpin;
1757
	}
1758

1759
	if (in_fence) {
1760
		ret = i915_gem_request_await_dma_fence(eb.request, in_fence);
1761 1762 1763 1764 1765
		if (ret < 0)
			goto err_request;
	}

	if (out_fence_fd != -1) {
1766
		out_fence = sync_file_create(&eb.request->fence);
1767 1768 1769 1770 1771 1772
		if (!out_fence) {
			ret = -ENOMEM;
			goto err_request;
		}
	}

1773 1774 1775 1776 1777 1778
	/* Whilst this request exists, batch_obj will be on the
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
1779
	eb.request->batch = eb.batch;
1780

1781 1782
	trace_i915_gem_request_queue(eb.request, eb.dispatch_flags);
	ret = execbuf_submit(&eb);
1783
err_request:
1784 1785
	__i915_add_request(eb.request, ret == 0);
	add_to_client(eb.request, file);
1786

1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
	if (out_fence) {
		if (ret == 0) {
			fd_install(out_fence_fd, out_fence->file);
			args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
1797

1798
err_batch_unpin:
1799 1800 1801 1802 1803 1804
	/*
	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
	 * batch vma for correctness. For less ugly and less fragility this
	 * needs to be adjusted to also track the ggtt batch vma properly as
	 * active.
	 */
1805 1806
	if (eb.dispatch_flags & I915_DISPATCH_SECURE)
		i915_vma_unpin(eb.batch);
1807
err:
1808
	/* the request owns the ref now */
1809
	eb_destroy(&eb);
1810 1811 1812
	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
1813 1814
	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
1815
	intel_runtime_pm_put(eb.i915);
1816 1817
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
1818
err_in_fence:
1819
	dma_fence_put(in_fence);
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1838
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1839 1840 1841 1842
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
M
Michal Hocko 已提交
1843 1844
	exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
	exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
1845
	if (exec_list == NULL || exec2_list == NULL) {
1846
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1847
			  args->buffer_count);
M
Michal Hocko 已提交
1848 1849
		kvfree(exec_list);
		kvfree(exec2_list);
1850 1851 1852
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
1853
			     u64_to_user_ptr(args->buffers_ptr),
1854 1855
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1856
		DRM_DEBUG("copy %d exec entries failed %d\n",
1857
			  args->buffer_count, ret);
M
Michal Hocko 已提交
1858 1859
		kvfree(exec_list);
		kvfree(exec2_list);
1860 1861 1862 1863 1864 1865 1866 1867 1868
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
1869
		if (INTEL_GEN(to_i915(dev)) < 4)
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1884
	i915_execbuffer2_set_context_id(exec2, 0);
1885

1886
	ret = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
1887
	if (!ret) {
1888
		struct drm_i915_gem_exec_object __user *user_exec_list =
1889
			u64_to_user_ptr(args->buffers_ptr);
1890

1891
		/* Copy the new buffer offsets back to the user's exec list. */
1892
		for (i = 0; i < args->buffer_count; i++) {
1893 1894
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user (%d)\n",
					  args->buffer_count, ret);
				break;
			}
1905 1906 1907
		}
	}

M
Michal Hocko 已提交
1908 1909
	kvfree(exec_list);
	kvfree(exec2_list);
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1921 1922
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1923
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1924 1925 1926
		return -EINVAL;
	}

M
Michal Hocko 已提交
1927
	exec2_list = kvmalloc_array(args->buffer_count,
1928 1929
				    sizeof(*exec2_list),
				    GFP_TEMPORARY);
1930
	if (exec2_list == NULL) {
1931
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1932 1933 1934 1935
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
1936
			     u64_to_user_ptr(args->buffers_ptr),
1937 1938
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1939
		DRM_DEBUG("copy %d exec entries failed %d\n",
1940
			  args->buffer_count, ret);
M
Michal Hocko 已提交
1941
		kvfree(exec2_list);
1942 1943 1944
		return -EFAULT;
	}

1945
	ret = i915_gem_do_execbuffer(dev, file, args, exec2_list);
1946 1947
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
1948
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1949
				   u64_to_user_ptr(args->buffers_ptr);
1950 1951 1952
		int i;

		for (i = 0; i < args->buffer_count; i++) {
1953 1954
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user\n",
					  args->buffer_count);
				break;
			}
1965 1966 1967
		}
	}

M
Michal Hocko 已提交
1968
	kvfree(exec2_list);
1969 1970
	return ret;
}