i915_gem_execbuffer.c 49.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
31
#include <linux/sync_file.h>
32 33
#include <linux/uaccess.h>

34 35
#include <drm/drmP.h>
#include <drm/i915_drm.h>
36

37
#include "i915_drv.h"
38
#include "i915_gem_clflush.h"
39 40
#include "i915_trace.h"
#include "intel_drv.h"
41
#include "intel_frontbuffer.h"
42

43 44
#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */

45 46 47 48 49
#define  __EXEC_OBJECT_HAS_PIN		(1<<31)
#define  __EXEC_OBJECT_HAS_FENCE	(1<<30)
#define  __EXEC_OBJECT_NEEDS_MAP	(1<<29)
#define  __EXEC_OBJECT_NEEDS_BIAS	(1<<28)
#define  __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
50 51

#define BATCH_OFFSET_BIAS (256*1024)
52

53 54
#define __I915_EXEC_ILLEGAL_FLAGS \
	(__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
55

56
struct i915_execbuffer {
57
	struct drm_i915_private *i915;
58 59 60 61 62 63 64 65 66 67 68 69 70
	struct drm_file *file;
	struct drm_i915_gem_execbuffer2 *args;
	struct drm_i915_gem_exec_object2 *exec;
	struct intel_engine_cs *engine;
	struct i915_gem_context *ctx;
	struct i915_address_space *vm;
	struct i915_vma *batch;
	struct drm_i915_gem_request *request;
	u32 batch_start_offset;
	u32 batch_len;
	unsigned int dispatch_flags;
	struct drm_i915_gem_exec_object2 shadow_exec_entry;
	bool need_relocs;
71
	struct list_head vmas;
72 73 74 75 76 77
	struct reloc_cache {
		struct drm_mm_node node;
		unsigned long vaddr;
		unsigned int page;
		bool use_64bit_reloc : 1;
	} reloc_cache;
78
	int and;
79
	union {
80 81
		struct i915_vma **lut;
		struct hlist_head *buckets;
82
	};
83 84
};

85
static int eb_create(struct i915_execbuffer *eb)
86
{
87 88 89
	eb->lut = NULL;
	if (eb->args->flags & I915_EXEC_HANDLE_LUT) {
		unsigned int size = eb->args->buffer_count;
90
		size *= sizeof(struct i915_vma *);
91 92
		eb->lut = kmalloc(size,
				  GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
93 94
	}

95 96 97
	if (!eb->lut) {
		unsigned int size = eb->args->buffer_count;
		unsigned int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
98
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
99 100
		while (count > 2*size)
			count >>= 1;
101 102 103 104
		eb->lut = kzalloc(count * sizeof(struct hlist_head),
				  GFP_TEMPORARY);
		if (!eb->lut)
			return -ENOMEM;
105 106

		eb->and = count - 1;
107 108 109
	} else {
		eb->and = -eb->args->buffer_count;
	}
110

111
	return 0;
112 113
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
static inline void
__eb_unreserve_vma(struct i915_vma *vma,
		   const struct drm_i915_gem_exec_object2 *entry)
{
	if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
		i915_vma_unpin_fence(vma);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
		__i915_vma_unpin(vma);
}

static void
eb_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

	__eb_unreserve_vma(vma, entry);
	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}

134
static void
135
eb_reset(struct i915_execbuffer *eb)
136
{
137 138
	struct i915_vma *vma;

139
	list_for_each_entry(vma, &eb->vmas, exec_link) {
140 141 142 143 144
		eb_unreserve_vma(vma);
		i915_vma_put(vma);
		vma->exec_entry = NULL;
	}

145 146
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
147 148
}

149
static struct i915_vma *
150
eb_get_batch(struct i915_execbuffer *eb)
151
{
152
	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_link);
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return vma;
}

169
static int
170
eb_lookup_vmas(struct i915_execbuffer *eb)
171
{
172 173
	struct drm_i915_gem_object *obj;
	struct list_head objects;
174
	int i, ret;
175

176 177
	INIT_LIST_HEAD(&eb->vmas);

178
	INIT_LIST_HEAD(&objects);
179
	spin_lock(&eb->file->table_lock);
180 181
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
182 183
	for (i = 0; i < eb->args->buffer_count; i++) {
		obj = to_intel_bo(idr_find(&eb->file->object_idr, eb->exec[i].handle));
184
		if (obj == NULL) {
185
			spin_unlock(&eb->file->table_lock);
186
			DRM_DEBUG("Invalid object handle %d at index %d\n",
187
				   eb->exec[i].handle, i);
188
			ret = -ENOENT;
189
			goto err;
190 191
		}

192
		if (!list_empty(&obj->obj_exec_link)) {
193
			spin_unlock(&eb->file->table_lock);
194
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
195
				   obj, eb->exec[i].handle, i);
196
			ret = -EINVAL;
197
			goto err;
198 199
		}

200
		i915_gem_object_get(obj);
201 202
		list_add_tail(&obj->obj_exec_link, &objects);
	}
203
	spin_unlock(&eb->file->table_lock);
204

205
	i = 0;
206
	while (!list_empty(&objects)) {
207
		struct i915_vma *vma;
208

209 210 211 212
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);

213 214 215 216 217 218 219 220
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
221
		vma = i915_vma_instance(obj, eb->vm, NULL);
C
Chris Wilson 已提交
222
		if (unlikely(IS_ERR(vma))) {
223 224
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
225
			goto err;
226 227
		}

228
		/* Transfer ownership from the objects list to the vmas list. */
229
		list_add_tail(&vma->exec_link, &eb->vmas);
230
		list_del_init(&obj->obj_exec_link);
231

232
		vma->exec_entry = &eb->exec[i];
233
		if (eb->and < 0) {
234
			eb->lut[i] = vma;
235
		} else {
236 237 238
			u32 handle =
				eb->args->flags & I915_EXEC_HANDLE_LUT ?
				i : eb->exec[i].handle;
239 240
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
241 242
				       &eb->buckets[handle & eb->and]);
		}
243
		++i;
244 245
	}

246
	return 0;
247 248


249
err:
250 251 252 253 254
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
255
		i915_gem_object_put(obj);
256
	}
257 258 259 260 261
	/*
	 * Objects already transfered to the vmas list will be unreferenced by
	 * eb_destroy.
	 */

262
	return ret;
263 264
}

265
static struct i915_vma *eb_get_vma(struct i915_execbuffer *eb, unsigned long handle)
266
{
267 268 269 270 271 272
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
273
		struct i915_vma *vma;
274

275
		head = &eb->buckets[handle & eb->and];
276
		hlist_for_each_entry(vma, head, exec_node) {
277 278
			if (vma->exec_handle == handle)
				return vma;
279 280 281
		}
		return NULL;
	}
282 283
}

284
static void eb_destroy(struct i915_execbuffer *eb)
285
{
286
	struct i915_vma *vma;
287

288
	list_for_each_entry(vma, &eb->vmas, exec_link) {
289 290
		if (!vma->exec_entry)
			continue;
291

292
		__eb_unreserve_vma(vma, vma->exec_entry);
293
		vma->exec_entry = NULL;
294
		i915_vma_put(vma);
295
	}
296 297 298 299 300

	i915_gem_context_put(eb->ctx);

	if (eb->buckets)
		kfree(eb->buckets);
301 302
}

303 304
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
305 306 307
	if (!i915_gem_object_has_struct_page(obj))
		return false;

308 309 310
	if (DBG_USE_CPU_RELOC)
		return DBG_USE_CPU_RELOC > 0;

311
	return (HAS_LLC(to_i915(obj->base.dev)) ||
312
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
313 314 315
		obj->cache_level != I915_CACHE_NONE);
}

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
/* Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline uint64_t gen8_canonical_addr(uint64_t address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline uint64_t gen8_noncanonical_addr(uint64_t address)
{
	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
}

static inline uint64_t
335
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
336 337 338 339 340
		  uint64_t target_offset)
{
	return gen8_canonical_addr((int)reloc->delta + target_offset);
}

341 342
static void reloc_cache_init(struct reloc_cache *cache,
			     struct drm_i915_private *i915)
343
{
344
	cache->page = -1;
345
	cache->vaddr = 0;
346 347
	/* Must be a variable in the struct to allow GCC to unroll. */
	cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
348
	cache->node.allocated = false;
349
}
350

351 352 353 354 355 356 357 358
static inline void *unmask_page(unsigned long p)
{
	return (void *)(uintptr_t)(p & PAGE_MASK);
}

static inline unsigned int unmask_flags(unsigned long p)
{
	return p & ~PAGE_MASK;
359 360
}

361 362
#define KMAP 0x4 /* after CLFLUSH_FLAGS */

363 364 365 366 367 368 369 370
static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
{
	struct drm_i915_private *i915 =
		container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
	return &i915->ggtt;
}

static void reloc_cache_reset(struct reloc_cache *cache)
371
{
372
	void *vaddr;
373

374 375
	if (!cache->vaddr)
		return;
376

377 378 379 380
	vaddr = unmask_page(cache->vaddr);
	if (cache->vaddr & KMAP) {
		if (cache->vaddr & CLFLUSH_AFTER)
			mb();
381

382 383 384
		kunmap_atomic(vaddr);
		i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
	} else {
385
		wmb();
386
		io_mapping_unmap_atomic((void __iomem *)vaddr);
387
		if (cache->node.allocated) {
388
			struct i915_ggtt *ggtt = cache_to_ggtt(cache);
389 390 391

			ggtt->base.clear_range(&ggtt->base,
					       cache->node.start,
392
					       cache->node.size);
393 394 395
			drm_mm_remove_node(&cache->node);
		} else {
			i915_vma_unpin((struct i915_vma *)cache->node.mm);
396
		}
397
	}
398 399 400

	cache->vaddr = 0;
	cache->page = -1;
401 402 403 404 405 406
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
			int page)
{
407 408 409 410 411 412 413
	void *vaddr;

	if (cache->vaddr) {
		kunmap_atomic(unmask_page(cache->vaddr));
	} else {
		unsigned int flushes;
		int ret;
414

415 416 417 418 419 420
		ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
		if (ret)
			return ERR_PTR(ret);

		BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
		BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
421

422 423 424 425
		cache->vaddr = flushes | KMAP;
		cache->node.mm = (void *)obj;
		if (flushes)
			mb();
426 427
	}

428 429
	vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
430
	cache->page = page;
431

432
	return vaddr;
433 434
}

435 436 437
static void *reloc_iomap(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
			 int page)
438
{
439
	struct i915_ggtt *ggtt = cache_to_ggtt(cache);
440
	unsigned long offset;
441
	void *vaddr;
442

443
	if (cache->vaddr) {
444
		io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
445 446 447
	} else {
		struct i915_vma *vma;
		int ret;
448

449 450
		if (use_cpu_reloc(obj))
			return NULL;
451

452 453 454
		ret = i915_gem_object_set_to_gtt_domain(obj, true);
		if (ret)
			return ERR_PTR(ret);
455

456 457
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
					       PIN_MAPPABLE | PIN_NONBLOCK);
458 459
		if (IS_ERR(vma)) {
			memset(&cache->node, 0, sizeof(cache->node));
460
			ret = drm_mm_insert_node_in_range
461
				(&ggtt->base.mm, &cache->node,
462
				 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
463
				 0, ggtt->mappable_end,
464
				 DRM_MM_INSERT_LOW);
465 466
			if (ret) /* no inactive aperture space, use cpu reloc */
				return NULL;
467
		} else {
468
			ret = i915_vma_put_fence(vma);
469 470 471 472
			if (ret) {
				i915_vma_unpin(vma);
				return ERR_PTR(ret);
			}
473

474 475
			cache->node.start = vma->node.start;
			cache->node.mm = (void *)vma;
476
		}
477
	}
478

479 480
	offset = cache->node.start;
	if (cache->node.allocated) {
481
		wmb();
482 483 484 485 486
		ggtt->base.insert_page(&ggtt->base,
				       i915_gem_object_get_dma_address(obj, page),
				       offset, I915_CACHE_NONE, 0);
	} else {
		offset += page << PAGE_SHIFT;
487 488
	}

489 490
	vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
							 offset);
491 492
	cache->page = page;
	cache->vaddr = (unsigned long)vaddr;
493

494
	return vaddr;
495 496
}

497 498 499
static void *reloc_vaddr(struct drm_i915_gem_object *obj,
			 struct reloc_cache *cache,
			 int page)
500
{
501
	void *vaddr;
502

503 504 505 506 507 508 509 510
	if (cache->page == page) {
		vaddr = unmask_page(cache->vaddr);
	} else {
		vaddr = NULL;
		if ((cache->vaddr & KMAP) == 0)
			vaddr = reloc_iomap(obj, cache, page);
		if (!vaddr)
			vaddr = reloc_kmap(obj, cache, page);
511 512
	}

513
	return vaddr;
514 515
}

516
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
517
{
518 519 520 521 522
	if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
		if (flushes & CLFLUSH_BEFORE) {
			clflushopt(addr);
			mb();
		}
523

524
		*addr = value;
525

526 527 528 529 530 531 532 533 534 535
		/* Writes to the same cacheline are serialised by the CPU
		 * (including clflush). On the write path, we only require
		 * that it hits memory in an orderly fashion and place
		 * mb barriers at the start and end of the relocation phase
		 * to ensure ordering of clflush wrt to the system.
		 */
		if (flushes & CLFLUSH_AFTER)
			clflushopt(addr);
	} else
		*addr = value;
536 537 538
}

static int
539 540 541 542
relocate_entry(struct drm_i915_gem_object *obj,
	       const struct drm_i915_gem_relocation_entry *reloc,
	       struct reloc_cache *cache,
	       u64 target_offset)
543
{
544 545 546
	u64 offset = reloc->offset;
	bool wide = cache->use_64bit_reloc;
	void *vaddr;
547

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	target_offset = relocation_target(reloc, target_offset);
repeat:
	vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
	if (IS_ERR(vaddr))
		return PTR_ERR(vaddr);

	clflush_write32(vaddr + offset_in_page(offset),
			lower_32_bits(target_offset),
			cache->vaddr);

	if (wide) {
		offset += sizeof(u32);
		target_offset >>= 32;
		wide = false;
		goto repeat;
563 564 565 566 567
	}

	return 0;
}

568
static int
569 570 571
eb_relocate_entry(struct drm_i915_gem_object *obj,
		  struct i915_execbuffer *eb,
		  struct drm_i915_gem_relocation_entry *reloc)
572 573
{
	struct drm_gem_object *target_obj;
574
	struct drm_i915_gem_object *target_i915_obj;
575
	struct i915_vma *target_vma;
B
Ben Widawsky 已提交
576
	uint64_t target_offset;
577
	int ret;
578

579
	/* we've already hold a reference to all valid objects */
580 581
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
582
		return -ENOENT;
583 584
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
585

586
	target_offset = gen8_canonical_addr(target_vma->node.start);
587

588 589 590
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
591 592
	if (unlikely(IS_GEN6(eb->i915) &&
		     reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
593
		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
594
				    PIN_GLOBAL);
595 596 597
		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
			return ret;
	}
598

599
	/* Validate that the target is in a valid r/w GPU domain */
600
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
601
		DRM_DEBUG("reloc with multiple write domains: "
602 603 604 605 606 607
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
608
		return -EINVAL;
609
	}
610 611
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
612
		DRM_DEBUG("reloc with read/write non-GPU domains: "
613 614 615 616 617 618
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
619
		return -EINVAL;
620 621 622 623 624 625 626 627 628
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
629
		return 0;
630 631

	/* Check that the relocation address is valid... */
632
	if (unlikely(reloc->offset >
633
		     obj->base.size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
634
		DRM_DEBUG("Relocation beyond object bounds: "
635 636 637 638
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
639
		return -EINVAL;
640
	}
641
	if (unlikely(reloc->offset & 3)) {
642
		DRM_DEBUG("Relocation not 4-byte aligned: "
643 644 645
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
646
		return -EINVAL;
647 648
	}

649
	ret = relocate_entry(obj, reloc, &eb->reloc_cache, target_offset);
650 651 652
	if (ret)
		return ret;

653 654
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;
655
	return 0;
656 657
}

658
static int eb_relocate_vma(struct i915_vma *vma, struct i915_execbuffer *eb)
659
{
660 661
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
662
	struct drm_i915_gem_relocation_entry __user *user_relocs;
663
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
664
	int remain, ret = 0;
665

666
	user_relocs = u64_to_user_ptr(entry->relocs_ptr);
667

668 669 670
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
671 672 673 674
		unsigned long unwritten;
		unsigned int count;

		count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
675 676
		remain -= count;

677 678 679 680 681 682 683 684 685 686 687
		/* This is the fast path and we cannot handle a pagefault
		 * whilst holding the struct mutex lest the user pass in the
		 * relocations contained within a mmaped bo. For in such a case
		 * we, the page fault handler would call i915_gem_fault() and
		 * we would try to acquire the struct mutex again. Obviously
		 * this is bad and so lockdep complains vehemently.
		 */
		pagefault_disable();
		unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
		pagefault_enable();
		if (unlikely(unwritten)) {
688 689 690
			ret = -EFAULT;
			goto out;
		}
691

692 693
		do {
			u64 offset = r->presumed_offset;
694

695
			ret = eb_relocate_entry(vma->obj, eb, r);
696
			if (ret)
697
				goto out;
698

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
			if (r->presumed_offset != offset) {
				pagefault_disable();
				unwritten = __put_user(r->presumed_offset,
						       &user_relocs->presumed_offset);
				pagefault_enable();
				if (unlikely(unwritten)) {
					/* Note that reporting an error now
					 * leaves everything in an inconsistent
					 * state as we have *already* changed
					 * the relocation value inside the
					 * object. As we have not changed the
					 * reloc.presumed_offset or will not
					 * change the execobject.offset, on the
					 * call we may not rewrite the value
					 * inside the object, leaving it
					 * dangling and causing a GPU hang.
					 */
					ret = -EFAULT;
					goto out;
				}
719 720 721 722 723
			}

			user_relocs++;
			r++;
		} while (--count);
724 725
	}

726
out:
727
	reloc_cache_reset(&eb->reloc_cache);
728
	return ret;
729
#undef N_RELOC
730 731 732
}

static int
733 734 735
eb_relocate_vma_slow(struct i915_vma *vma,
		     struct i915_execbuffer *eb,
		     struct drm_i915_gem_relocation_entry *relocs)
736
{
737
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
738
	int i, ret = 0;
739 740

	for (i = 0; i < entry->relocation_count; i++) {
741
		ret = eb_relocate_entry(vma->obj, eb, &relocs[i]);
742
		if (ret)
743
			break;
744
	}
745
	reloc_cache_reset(&eb->reloc_cache);
746
	return ret;
747 748
}

749
static int eb_relocate(struct i915_execbuffer *eb)
750
{
751
	struct i915_vma *vma;
752 753
	int ret = 0;

754
	list_for_each_entry(vma, &eb->vmas, exec_link) {
755
		ret = eb_relocate_vma(vma, eb);
756
		if (ret)
757
			break;
758 759
	}

760
	return ret;
761 762
}

763 764 765 766 767 768
static bool only_mappable_for_reloc(unsigned int flags)
{
	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
		__EXEC_OBJECT_NEEDS_MAP;
}

769
static int
770 771 772
eb_reserve_vma(struct i915_vma *vma,
	       struct intel_engine_cs *engine,
	       bool *need_reloc)
773
{
774
	struct drm_i915_gem_object *obj = vma->obj;
775
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
776
	uint64_t flags;
777 778
	int ret;

779
	flags = PIN_USER;
780 781 782
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
		flags |= PIN_GLOBAL;

783
	if (!drm_mm_node_allocated(&vma->node)) {
784 785 786 787 788
		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
		 * limit address to the first 4GBs for unflagged objects.
		 */
		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
			flags |= PIN_ZONE_4G;
789 790 791 792
		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
			flags |= PIN_GLOBAL | PIN_MAPPABLE;
		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
793 794
		if (entry->flags & EXEC_OBJECT_PINNED)
			flags |= entry->offset | PIN_OFFSET_FIXED;
795 796
		if ((flags & PIN_MAPPABLE) == 0)
			flags |= PIN_HIGH;
797
	}
798

799 800 801 802 803
	ret = i915_vma_pin(vma,
			   entry->pad_to_size,
			   entry->alignment,
			   flags);
	if ((ret == -ENOSPC || ret == -E2BIG) &&
804
	    only_mappable_for_reloc(entry->flags))
805 806 807 808
		ret = i915_vma_pin(vma,
				   entry->pad_to_size,
				   entry->alignment,
				   flags & ~PIN_MAPPABLE);
809 810 811
	if (ret)
		return ret;

812 813
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

814
	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
815
		ret = i915_vma_get_fence(vma);
816 817
		if (ret)
			return ret;
818

819
		if (i915_vma_pin_fence(vma))
820
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
821 822
	}

823 824
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
825 826 827 828 829 830 831 832
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

833
	return 0;
834
}
835

836
static bool
837
need_reloc_mappable(struct i915_vma *vma)
838 839 840
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

841 842 843
	if (entry->relocation_count == 0)
		return false;

844
	if (!i915_vma_is_ggtt(vma))
845 846 847
		return false;

	/* See also use_cpu_reloc() */
848
	if (HAS_LLC(to_i915(vma->obj->base.dev)))
849 850 851 852 853 854 855 856 857 858 859 860
		return false;

	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

	return true;
}

static bool
eb_vma_misplaced(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
861

862 863
	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
		!i915_vma_is_ggtt(vma));
864

865
	if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
866 867
		return true;

868 869 870
	if (vma->node.size < entry->pad_to_size)
		return true;

871 872 873 874
	if (entry->flags & EXEC_OBJECT_PINNED &&
	    vma->node.start != entry->offset)
		return true;

875 876 877 878
	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

879
	/* avoid costly ping-pong once a batch bo ended up non-mappable */
880 881
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
	    !i915_vma_is_map_and_fenceable(vma))
882 883
		return !only_mappable_for_reloc(entry->flags);

884 885 886 887
	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

888 889 890
	return false;
}

891
static int eb_reserve(struct i915_execbuffer *eb)
892
{
893 894
	const bool has_fenced_gpu_access = INTEL_GEN(eb->i915) < 4;
	const bool needs_unfenced_map = INTEL_INFO(eb->i915)->unfenced_needs_alignment;
895
	struct drm_i915_gem_object *obj;
896 897
	struct i915_vma *vma;
	struct list_head ordered_vmas;
898
	struct list_head pinned_vmas;
899
	int retry;
900

901
	INIT_LIST_HEAD(&ordered_vmas);
902
	INIT_LIST_HEAD(&pinned_vmas);
903
	while (!list_empty(&eb->vmas)) {
904 905 906
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

907
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_link);
908 909
		obj = vma->obj;
		entry = vma->exec_entry;
910

911
		if (eb->ctx->flags & CONTEXT_NO_ZEROMAP)
912 913
			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

914 915
		if (!has_fenced_gpu_access)
			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
916
		need_fence =
917 918
			(entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
			 needs_unfenced_map) &&
919
			i915_gem_object_is_tiled(obj);
920
		need_mappable = need_fence || need_reloc_mappable(vma);
921

922
		if (entry->flags & EXEC_OBJECT_PINNED)
923
			list_move_tail(&vma->exec_link, &pinned_vmas);
924
		else if (need_mappable) {
925
			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
926
			list_move(&vma->exec_link, &ordered_vmas);
927
		} else
928
			list_move_tail(&vma->exec_link, &ordered_vmas);
929

930
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
931
		obj->base.pending_write_domain = 0;
932
	}
933 934
	list_splice(&ordered_vmas, &eb->vmas);
	list_splice(&pinned_vmas, &eb->vmas);
935 936 937 938 939 940 941 942 943 944

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
945
	 * This avoid unnecessary unbinding of later objects in order to make
946 947 948 949
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
950
		int ret = 0;
951 952

		/* Unbind any ill-fitting objects or pin. */
953
		list_for_each_entry(vma, &eb->vmas, exec_link) {
954
			if (!drm_mm_node_allocated(&vma->node))
955 956
				continue;

957
			if (eb_vma_misplaced(vma))
958
				ret = i915_vma_unbind(vma);
959
			else
960
				ret = eb_reserve_vma(vma, eb->engine, &eb->need_relocs);
961
			if (ret)
962 963 964 965
				goto err;
		}

		/* Bind fresh objects */
966
		list_for_each_entry(vma, &eb->vmas, exec_link) {
967
			if (drm_mm_node_allocated(&vma->node))
968
				continue;
969

970
			ret = eb_reserve_vma(vma, eb->engine, &eb->need_relocs);
971 972
			if (ret)
				goto err;
973 974
		}

975
err:
C
Chris Wilson 已提交
976
		if (ret != -ENOSPC || retry++)
977 978
			return ret;

979
		/* Decrement pin count for bound objects */
980
		list_for_each_entry(vma, &eb->vmas, exec_link)
981
			eb_unreserve_vma(vma);
982

983
		ret = i915_gem_evict_vm(eb->vm, true);
984 985 986 987 988 989
		if (ret)
			return ret;
	} while (1);
}

static int
990
eb_relocate_slow(struct i915_execbuffer *eb)
991
{
992 993
	const unsigned int count = eb->args->buffer_count;
	struct drm_device *dev = &eb->i915->drm;
994
	struct drm_i915_gem_relocation_entry *reloc;
995
	struct i915_vma *vma;
996
	int *reloc_offset;
997
	int i, total, ret;
998

999
	/* We may process another execbuffer during the unlock... */
1000
	eb_reset(eb);
1001 1002 1003 1004
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
1005
		total += eb->exec[i].relocation_count;
1006

M
Michal Hocko 已提交
1007 1008
	reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
	reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
1009
	if (reloc == NULL || reloc_offset == NULL) {
M
Michal Hocko 已提交
1010 1011
		kvfree(reloc);
		kvfree(reloc_offset);
1012 1013 1014 1015 1016 1017 1018
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
1019 1020
		u64 invalid_offset = (u64)-1;
		int j;
1021

1022
		user_relocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1023 1024

		if (copy_from_user(reloc+total, user_relocs,
1025
				   eb->exec[i].relocation_count * sizeof(*reloc))) {
1026 1027 1028 1029 1030
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

1031 1032 1033 1034 1035 1036 1037 1038 1039
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
1040
		for (j = 0; j < eb->exec[i].relocation_count; j++) {
1041 1042 1043
			if (__copy_to_user(&user_relocs[j].presumed_offset,
					   &invalid_offset,
					   sizeof(invalid_offset))) {
1044 1045 1046 1047 1048 1049
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

1050
		reloc_offset[i] = total;
1051
		total += eb->exec[i].relocation_count;
1052 1053 1054 1055 1056 1057 1058 1059
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

1060
	/* reacquire the objects */
1061
	ret = eb_lookup_vmas(eb);
1062 1063
	if (ret)
		goto err;
1064

1065
	ret = eb_reserve(eb);
1066 1067 1068
	if (ret)
		goto err;

1069
	list_for_each_entry(vma, &eb->vmas, exec_link) {
1070 1071 1072
		int idx = vma->exec_entry - eb->exec;

		ret = eb_relocate_vma_slow(vma, eb, reloc + reloc_offset[idx]);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
M
Michal Hocko 已提交
1084 1085
	kvfree(reloc);
	kvfree(reloc_offset);
1086 1087 1088 1089
	return ret;
}

static int
1090
eb_move_to_gpu(struct i915_execbuffer *eb)
1091
{
1092
	struct i915_vma *vma;
1093
	int ret;
1094

1095
	list_for_each_entry(vma, &eb->vmas, exec_link) {
1096
		struct drm_i915_gem_object *obj = vma->obj;
1097

1098 1099 1100 1101 1102 1103 1104
		if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
			struct i915_gem_capture_list *capture;

			capture = kmalloc(sizeof(*capture), GFP_KERNEL);
			if (unlikely(!capture))
				return -ENOMEM;

1105
			capture->next = eb->request->capture_list;
1106
			capture->vma = vma;
1107
			eb->request->capture_list = capture;
1108 1109
		}

1110 1111 1112
		if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
			continue;

1113 1114 1115 1116 1117
		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) {
			i915_gem_clflush_object(obj, 0);
			obj->base.write_domain = 0;
		}

1118
		ret = i915_gem_request_await_object
1119
			(eb->request, obj, obj->base.pending_write_domain);
1120 1121
		if (ret)
			return ret;
1122 1123
	}

1124
	/* Unconditionally flush any chipset caches (for streaming writes). */
1125
	i915_gem_chipset_flush(eb->i915);
1126

1127
	/* Unconditionally invalidate GPU caches and TLBs. */
1128
	return eb->engine->emit_flush(eb->request, EMIT_INVALIDATE);
1129 1130
}

1131 1132
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1133
{
1134
	if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1135 1136
		return false;

C
Chris Wilson 已提交
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
	/* Kernel clipping was a DRI1 misfeature */
	if (exec->num_cliprects || exec->cliprects_ptr)
		return false;

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1152 1153 1154
}

static int
1155 1156
validate_exec_list(struct drm_device *dev,
		   struct drm_i915_gem_exec_object2 *exec,
1157 1158
		   int count)
{
1159 1160
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1161 1162 1163
	unsigned invalid_flags;
	int i;

1164 1165 1166
	/* INTERNAL flags must not overlap with external ones */
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);

1167 1168 1169
	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(dev))
		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1170 1171

	for (i = 0; i < count; i++) {
1172
		char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1173 1174
		int length; /* limited by fault_in_pages_readable() */

1175
		if (exec[i].flags & invalid_flags)
1176 1177
			return -EINVAL;

1178 1179 1180 1181 1182 1183 1184 1185 1186
		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
		 * any non-page-aligned or non-canonical addresses.
		 */
		if (exec[i].flags & EXEC_OBJECT_PINNED) {
			if (exec[i].offset !=
			    gen8_canonical_addr(exec[i].offset & PAGE_MASK))
				return -EINVAL;
		}

1187 1188 1189 1190 1191 1192
		/* From drm_mm perspective address space is continuous,
		 * so from this point we're always using non-canonical
		 * form internally.
		 */
		exec[i].offset = gen8_noncanonical_addr(exec[i].offset);

1193 1194 1195
		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
			return -EINVAL;

1196 1197 1198 1199 1200 1201 1202 1203
		/* pad_to_size was once a reserved field, so sanitize it */
		if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
			if (offset_in_page(exec[i].pad_to_size))
				return -EINVAL;
		} else {
			exec[i].pad_to_size = 0;
		}

1204 1205 1206 1207 1208
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
1209
			return -EINVAL;
1210
		relocs_total += exec[i].relocation_count;
1211 1212 1213

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
1214 1215 1216 1217 1218
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
1219 1220 1221
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

1222
		if (likely(!i915.prefault_disable)) {
1223
			if (fault_in_pages_readable(ptr, length))
1224 1225
				return -EFAULT;
		}
1226 1227 1228 1229 1230
	}

	return 0;
}

1231
static int eb_select_context(struct i915_execbuffer *eb)
1232
{
1233
	unsigned int ctx_id = i915_execbuffer2_get_context_id(*eb->args);
1234
	struct i915_gem_context *ctx;
1235

1236 1237 1238
	ctx = i915_gem_context_lookup(eb->file->driver_priv, ctx_id);
	if (unlikely(IS_ERR(ctx)))
		return PTR_ERR(ctx);
1239

1240
	if (unlikely(i915_gem_context_is_banned(ctx))) {
1241
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1242
		return -EIO;
1243 1244
	}

1245 1246 1247 1248
	eb->ctx = i915_gem_context_get(ctx);
	eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;

	return 0;
1249 1250
}

1251 1252 1253 1254 1255 1256
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	return !(obj->cache_level == I915_CACHE_NONE ||
		 obj->cache_level == I915_CACHE_WT);
}

1257 1258 1259 1260 1261 1262 1263
void i915_vma_move_to_active(struct i915_vma *vma,
			     struct drm_i915_gem_request *req,
			     unsigned int flags)
{
	struct drm_i915_gem_object *obj = vma->obj;
	const unsigned int idx = req->engine->id;

1264
	lockdep_assert_held(&req->i915->drm.struct_mutex);
1265 1266
	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

1267 1268 1269 1270 1271 1272 1273
	/* Add a reference if we're newly entering the active list.
	 * The order in which we add operations to the retirement queue is
	 * vital here: mark_active adds to the start of the callback list,
	 * such that subsequent callbacks are called first. Therefore we
	 * add the active reference first and queue for it to be dropped
	 * *last*.
	 */
1274 1275 1276 1277 1278
	if (!i915_vma_is_active(vma))
		obj->active_count++;
	i915_vma_set_active(vma, idx);
	i915_gem_active_set(&vma->last_read[idx], req);
	list_move_tail(&vma->vm_link, &vma->vm->active_list);
1279 1280

	if (flags & EXEC_OBJECT_WRITE) {
1281 1282
		if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
			i915_gem_active_set(&obj->frontbuffer_write, req);
1283 1284 1285

		/* update for the implicit flush after a batch */
		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1286 1287
		if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
			obj->cache_dirty = true;
1288 1289
	}

1290 1291
	if (flags & EXEC_OBJECT_NEEDS_FENCE)
		i915_gem_active_set(&vma->last_fence, req);
1292 1293
}

1294 1295 1296 1297
static void eb_export_fence(struct drm_i915_gem_object *obj,
			    struct drm_i915_gem_request *req,
			    unsigned int flags)
{
1298
	struct reservation_object *resv = obj->resv;
1299 1300 1301 1302 1303

	/* Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
1304
	reservation_object_lock(resv, NULL);
1305 1306 1307 1308
	if (flags & EXEC_OBJECT_WRITE)
		reservation_object_add_excl_fence(resv, &req->fence);
	else if (reservation_object_reserve_shared(resv) == 0)
		reservation_object_add_shared_fence(resv, &req->fence);
1309
	reservation_object_unlock(resv);
1310 1311
}

1312
static void
1313
eb_move_to_active(struct i915_execbuffer *eb)
1314
{
1315
	struct i915_vma *vma;
1316

1317
	list_for_each_entry(vma, &eb->vmas, exec_link) {
1318
		struct drm_i915_gem_object *obj = vma->obj;
C
Chris Wilson 已提交
1319

1320
		obj->base.write_domain = obj->base.pending_write_domain;
1321 1322 1323
		if (obj->base.write_domain)
			vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
		else
1324 1325
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
1326

1327 1328
		i915_vma_move_to_active(vma, eb->request, vma->exec_entry->flags);
		eb_export_fence(obj, eb->request, vma->exec_entry->flags);
1329 1330 1331
	}
}

1332
static int
1333
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1334
{
1335 1336
	u32 *cs;
	int i;
1337

1338
	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1339 1340 1341
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1342

1343 1344 1345
	cs = intel_ring_begin(req, 4 * 3);
	if (IS_ERR(cs))
		return PTR_ERR(cs);
1346 1347

	for (i = 0; i < 4; i++) {
1348 1349 1350
		*cs++ = MI_LOAD_REGISTER_IMM(1);
		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
		*cs++ = 0;
1351 1352
	}

1353
	intel_ring_advance(req, cs);
1354 1355 1356 1357

	return 0;
}

1358
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1359 1360
{
	struct drm_i915_gem_object *shadow_batch_obj;
1361
	struct i915_vma *vma;
1362 1363
	int ret;

1364 1365
	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
						   PAGE_ALIGN(eb->batch_len));
1366
	if (IS_ERR(shadow_batch_obj))
1367
		return ERR_CAST(shadow_batch_obj);
1368

1369 1370
	ret = intel_engine_cmd_parser(eb->engine,
				      eb->batch->obj,
1371
				      shadow_batch_obj,
1372 1373
				      eb->batch_start_offset,
				      eb->batch_len,
1374
				      is_master);
C
Chris Wilson 已提交
1375 1376 1377 1378 1379 1380 1381
	if (ret) {
		if (ret == -EACCES) /* unhandled chained batch */
			vma = NULL;
		else
			vma = ERR_PTR(ret);
		goto out;
	}
1382

C
Chris Wilson 已提交
1383 1384 1385
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
1386

1387 1388
	vma->exec_entry =
		memset(&eb->shadow_exec_entry, 0, sizeof(*vma->exec_entry));
C
Chris Wilson 已提交
1389
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1390
	i915_gem_object_get(shadow_batch_obj);
1391
	list_add_tail(&vma->exec_link, &eb->vmas);
1392

C
Chris Wilson 已提交
1393
out:
C
Chris Wilson 已提交
1394
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
1395
	return vma;
1396
}
1397

1398 1399 1400 1401 1402 1403 1404 1405
static void
add_to_client(struct drm_i915_gem_request *req,
	      struct drm_file *file)
{
	req->file_priv = file->driver_priv;
	list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
}

1406
static int
1407
execbuf_submit(struct i915_execbuffer *eb)
1408
{
C
Chris Wilson 已提交
1409
	int ret;
1410

1411
	ret = eb_move_to_gpu(eb);
1412
	if (ret)
C
Chris Wilson 已提交
1413
		return ret;
1414

1415
	ret = i915_switch_context(eb->request);
1416
	if (ret)
C
Chris Wilson 已提交
1417
		return ret;
1418

1419 1420
	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
		ret = i915_reset_gen7_sol_offsets(eb->request);
1421
		if (ret)
C
Chris Wilson 已提交
1422
			return ret;
1423 1424
	}

1425 1426 1427 1428 1429
	ret = eb->engine->emit_bb_start(eb->request,
					eb->batch->node.start +
					eb->batch_start_offset,
					eb->batch_len,
					eb->dispatch_flags);
C
Chris Wilson 已提交
1430 1431
	if (ret)
		return ret;
1432

1433
	eb_move_to_active(eb);
1434

C
Chris Wilson 已提交
1435
	return 0;
1436 1437
}

1438 1439
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
1440
 * The engine index is returned.
1441
 */
1442
static unsigned int
1443 1444
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
1445 1446 1447
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

1448
	/* Check whether the file_priv has already selected one ring. */
1449 1450 1451
	if ((int)file_priv->bsd_engine < 0)
		file_priv->bsd_engine = atomic_fetch_xor(1,
			 &dev_priv->mm.bsd_engine_dispatch_index);
1452

1453
	return file_priv->bsd_engine;
1454 1455
}

1456 1457
#define I915_USER_RINGS (4)

1458
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1459 1460 1461 1462 1463 1464 1465
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

1466 1467 1468 1469
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
1470 1471
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1472
	struct intel_engine_cs *engine;
1473 1474 1475

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1476
		return NULL;
1477 1478 1479 1480 1481 1482
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
1483
		return NULL;
1484 1485 1486 1487 1488 1489
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1490
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
1491 1492
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
1493
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1494 1495 1496 1497
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
1498
			return NULL;
1499 1500
		}

1501
		engine = dev_priv->engine[_VCS(bsd_idx)];
1502
	} else {
1503
		engine = dev_priv->engine[user_ring_map[user_ring_id]];
1504 1505
	}

1506
	if (!engine) {
1507
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1508
		return NULL;
1509 1510
	}

1511
	return engine;
1512 1513
}

1514
static int
1515
i915_gem_do_execbuffer(struct drm_device *dev,
1516 1517
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1518
		       struct drm_i915_gem_exec_object2 *exec)
1519
{
1520
	struct i915_execbuffer eb;
1521 1522 1523
	struct dma_fence *in_fence = NULL;
	struct sync_file *out_fence = NULL;
	int out_fence_fd = -1;
1524
	int ret;
1525

1526
	if (!i915_gem_check_execbuffer(args))
1527 1528
		return -EINVAL;

1529
	ret = validate_exec_list(dev, exec, args->buffer_count);
1530 1531 1532
	if (ret)
		return ret;

1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
	eb.i915 = to_i915(dev);
	eb.file = file;
	eb.args = args;
	eb.exec = exec;
	eb.need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
	reloc_cache_init(&eb.reloc_cache, eb.i915);

	eb.batch_start_offset = args->batch_start_offset;
	eb.batch_len = args->batch_len;

	eb.dispatch_flags = 0;
1544
	if (args->flags & I915_EXEC_SECURE) {
1545
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1546 1547
		    return -EPERM;

1548
		eb.dispatch_flags |= I915_DISPATCH_SECURE;
1549
	}
1550
	if (args->flags & I915_EXEC_IS_PINNED)
1551
		eb.dispatch_flags |= I915_DISPATCH_PINNED;
1552

1553 1554
	eb.engine = eb_select_engine(eb.i915, file, args);
	if (!eb.engine)
1555 1556
		return -EINVAL;

1557
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1558
		if (!HAS_RESOURCE_STREAMER(eb.i915)) {
1559 1560 1561
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
1562
		if (eb.engine->id != RCS) {
1563
			DRM_DEBUG("RS is not available on %s\n",
1564
				 eb.engine->name);
1565 1566 1567
			return -EINVAL;
		}

1568
		eb.dispatch_flags |= I915_DISPATCH_RS;
1569 1570
	}

1571 1572
	if (args->flags & I915_EXEC_FENCE_IN) {
		in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
1573 1574
		if (!in_fence)
			return -EINVAL;
1575 1576 1577 1578 1579 1580
	}

	if (args->flags & I915_EXEC_FENCE_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
			ret = out_fence_fd;
1581
			goto err_in_fence;
1582 1583 1584
		}
	}

1585 1586 1587 1588 1589 1590
	/* Take a local wakeref for preparing to dispatch the execbuf as
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
1591
	intel_runtime_pm_get(eb.i915);
1592

1593 1594 1595 1596
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1597 1598
	ret = eb_select_context(&eb);
	if (ret) {
1599 1600
		mutex_unlock(&dev->struct_mutex);
		goto pre_mutex_err;
1601
	}
1602

1603 1604
	if (eb_create(&eb)) {
		i915_gem_context_put(eb.ctx);
1605 1606 1607 1608 1609
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1610
	/* Look up object handles */
1611
	ret = eb_lookup_vmas(&eb);
1612 1613
	if (ret)
		goto err;
1614

1615
	/* take note of the batch buffer before we might reorder the lists */
1616
	eb.batch = eb_get_batch(&eb);
1617

1618
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1619
	ret = eb_reserve(&eb);
1620 1621 1622 1623
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1624 1625
	if (eb.need_relocs)
		ret = eb_relocate(&eb);
1626 1627
	if (ret) {
		if (ret == -EFAULT) {
1628
			ret = eb_relocate_slow(&eb);
1629 1630 1631 1632 1633 1634 1635
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
1636
	if (eb.batch->obj->base.pending_write_domain) {
1637
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1638 1639 1640
		ret = -EINVAL;
		goto err;
	}
1641 1642
	if (eb.batch_start_offset > eb.batch->size ||
	    eb.batch_len > eb.batch->size - eb.batch_start_offset) {
1643 1644 1645 1646
		DRM_DEBUG("Attempting to use out-of-bounds batch\n");
		ret = -EINVAL;
		goto err;
	}
1647

1648
	if (eb.engine->needs_cmd_parser && eb.batch_len) {
1649 1650
		struct i915_vma *vma;

1651
		vma = eb_parse(&eb, drm_is_current_master(file));
1652 1653
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
1654 1655
			goto err;
		}
1656

1657
		if (vma) {
1658 1659 1660 1661 1662 1663 1664 1665 1666
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
1667 1668 1669
			eb.dispatch_flags |= I915_DISPATCH_SECURE;
			eb.batch_start_offset = 0;
			eb.batch = vma;
1670
		}
1671 1672
	}

1673 1674 1675
	eb.batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
	if (eb.batch_len == 0)
		eb.batch_len = eb.batch->size - eb.batch_start_offset;
1676

1677 1678
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1679
	 * hsw should have this fixed, but bdw mucks it up again. */
1680 1681
	if (eb.dispatch_flags & I915_DISPATCH_SECURE) {
		struct drm_i915_gem_object *obj = eb.batch->obj;
C
Chris Wilson 已提交
1682
		struct i915_vma *vma;
1683

1684 1685 1686 1687 1688 1689
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
1690
		 *   so we don't really have issues with multiple objects not
1691 1692 1693
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
C
Chris Wilson 已提交
1694 1695 1696
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
1697
			goto err;
C
Chris Wilson 已提交
1698
		}
1699

1700
		eb.batch = vma;
1701
	}
1702

1703
	/* Allocate a request for this batch buffer nice and early. */
1704 1705 1706
	eb.request = i915_gem_request_alloc(eb.engine, eb.ctx);
	if (IS_ERR(eb.request)) {
		ret = PTR_ERR(eb.request);
1707
		goto err_batch_unpin;
1708
	}
1709

1710
	if (in_fence) {
1711
		ret = i915_gem_request_await_dma_fence(eb.request, in_fence);
1712 1713 1714 1715 1716
		if (ret < 0)
			goto err_request;
	}

	if (out_fence_fd != -1) {
1717
		out_fence = sync_file_create(&eb.request->fence);
1718 1719 1720 1721 1722 1723
		if (!out_fence) {
			ret = -ENOMEM;
			goto err_request;
		}
	}

1724 1725 1726 1727 1728 1729
	/* Whilst this request exists, batch_obj will be on the
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
1730
	eb.request->batch = eb.batch;
1731

1732 1733
	trace_i915_gem_request_queue(eb.request, eb.dispatch_flags);
	ret = execbuf_submit(&eb);
1734
err_request:
1735 1736
	__i915_add_request(eb.request, ret == 0);
	add_to_client(eb.request, file);
1737

1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
	if (out_fence) {
		if (ret == 0) {
			fd_install(out_fence_fd, out_fence->file);
			args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
			args->rsvd2 |= (u64)out_fence_fd << 32;
			out_fence_fd = -1;
		} else {
			fput(out_fence->file);
		}
	}
1748

1749
err_batch_unpin:
1750 1751 1752 1753 1754 1755
	/*
	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
	 * batch vma for correctness. For less ugly and less fragility this
	 * needs to be adjusted to also track the ggtt batch vma properly as
	 * active.
	 */
1756 1757
	if (eb.dispatch_flags & I915_DISPATCH_SECURE)
		i915_vma_unpin(eb.batch);
1758
err:
1759
	/* the request owns the ref now */
1760
	eb_destroy(&eb);
1761 1762 1763
	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
1764 1765
	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
1766
	intel_runtime_pm_put(eb.i915);
1767 1768
	if (out_fence_fd != -1)
		put_unused_fd(out_fence_fd);
1769
err_in_fence:
1770
	dma_fence_put(in_fence);
1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1789
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1790 1791 1792 1793
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
M
Michal Hocko 已提交
1794 1795
	exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
	exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
1796
	if (exec_list == NULL || exec2_list == NULL) {
1797
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1798
			  args->buffer_count);
M
Michal Hocko 已提交
1799 1800
		kvfree(exec_list);
		kvfree(exec2_list);
1801 1802 1803
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
1804
			     u64_to_user_ptr(args->buffers_ptr),
1805 1806
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1807
		DRM_DEBUG("copy %d exec entries failed %d\n",
1808
			  args->buffer_count, ret);
M
Michal Hocko 已提交
1809 1810
		kvfree(exec_list);
		kvfree(exec2_list);
1811 1812 1813 1814 1815 1816 1817 1818 1819
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
1820
		if (INTEL_GEN(to_i915(dev)) < 4)
1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1835
	i915_execbuffer2_set_context_id(exec2, 0);
1836

1837
	ret = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
1838
	if (!ret) {
1839
		struct drm_i915_gem_exec_object __user *user_exec_list =
1840
			u64_to_user_ptr(args->buffers_ptr);
1841

1842
		/* Copy the new buffer offsets back to the user's exec list. */
1843
		for (i = 0; i < args->buffer_count; i++) {
1844 1845
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user (%d)\n",
					  args->buffer_count, ret);
				break;
			}
1856 1857 1858
		}
	}

M
Michal Hocko 已提交
1859 1860
	kvfree(exec_list);
	kvfree(exec2_list);
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1872 1873
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1874
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1875 1876 1877
		return -EINVAL;
	}

M
Michal Hocko 已提交
1878
	exec2_list = kvmalloc_array(args->buffer_count,
1879 1880
				    sizeof(*exec2_list),
				    GFP_TEMPORARY);
1881
	if (exec2_list == NULL) {
1882
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1883 1884 1885 1886
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
1887
			     u64_to_user_ptr(args->buffers_ptr),
1888 1889
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1890
		DRM_DEBUG("copy %d exec entries failed %d\n",
1891
			  args->buffer_count, ret);
M
Michal Hocko 已提交
1892
		kvfree(exec2_list);
1893 1894 1895
		return -EFAULT;
	}

1896
	ret = i915_gem_do_execbuffer(dev, file, args, exec2_list);
1897 1898
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
1899
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1900
				   u64_to_user_ptr(args->buffers_ptr);
1901 1902 1903
		int i;

		for (i = 0; i < args->buffer_count; i++) {
1904 1905
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user\n",
					  args->buffer_count);
				break;
			}
1916 1917 1918
		}
	}

M
Michal Hocko 已提交
1919
	kvfree(exec2_list);
1920 1921
	return ret;
}