i915_gem_execbuffer.c 52.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008,2010 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30 31 32
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
#include <linux/uaccess.h>

33 34
#include <drm/drmP.h>
#include <drm/i915_drm.h>
35

36
#include "i915_drv.h"
37
#include "i915_gem_dmabuf.h"
38 39
#include "i915_trace.h"
#include "intel_drv.h"
40
#include "intel_frontbuffer.h"
41

42 43 44 45 46
#define  __EXEC_OBJECT_HAS_PIN		(1<<31)
#define  __EXEC_OBJECT_HAS_FENCE	(1<<30)
#define  __EXEC_OBJECT_NEEDS_MAP	(1<<29)
#define  __EXEC_OBJECT_NEEDS_BIAS	(1<<28)
#define  __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
47 48

#define BATCH_OFFSET_BIAS (256*1024)
49

50 51 52
struct i915_execbuffer_params {
	struct drm_device               *dev;
	struct drm_file                 *file;
53 54 55
	struct i915_vma			*batch;
	u32				dispatch_flags;
	u32				args_batch_start_offset;
56 57 58 59 60
	struct intel_engine_cs          *engine;
	struct i915_gem_context         *ctx;
	struct drm_i915_gem_request     *request;
};

61 62
struct eb_vmas {
	struct list_head vmas;
63
	int and;
64
	union {
65
		struct i915_vma *lut[0];
66 67
		struct hlist_head buckets[0];
	};
68 69
};

70
static struct eb_vmas *
B
Ben Widawsky 已提交
71
eb_create(struct drm_i915_gem_execbuffer2 *args)
72
{
73
	struct eb_vmas *eb = NULL;
74 75

	if (args->flags & I915_EXEC_HANDLE_LUT) {
76
		unsigned size = args->buffer_count;
77 78
		size *= sizeof(struct i915_vma *);
		size += sizeof(struct eb_vmas);
79 80 81 82
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
	}

	if (eb == NULL) {
83 84
		unsigned size = args->buffer_count;
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
L
Lauri Kasanen 已提交
85
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
86 87 88
		while (count > 2*size)
			count >>= 1;
		eb = kzalloc(count*sizeof(struct hlist_head) +
89
			     sizeof(struct eb_vmas),
90 91 92 93 94 95 96 97
			     GFP_TEMPORARY);
		if (eb == NULL)
			return eb;

		eb->and = count - 1;
	} else
		eb->and = -args->buffer_count;

98
	INIT_LIST_HEAD(&eb->vmas);
99 100 101 102
	return eb;
}

static void
103
eb_reset(struct eb_vmas *eb)
104
{
105 106
	if (eb->and >= 0)
		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
107 108
}

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
static struct i915_vma *
eb_get_batch(struct eb_vmas *eb)
{
	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);

	/*
	 * SNA is doing fancy tricks with compressing batch buffers, which leads
	 * to negative relocation deltas. Usually that works out ok since the
	 * relocate address is still positive, except when the batch is placed
	 * very low in the GTT. Ensure this doesn't happen.
	 *
	 * Note that actual hangs have only been observed on gen7, but for
	 * paranoia do it everywhere.
	 */
	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

	return vma;
}

129
static int
130 131 132 133 134
eb_lookup_vmas(struct eb_vmas *eb,
	       struct drm_i915_gem_exec_object2 *exec,
	       const struct drm_i915_gem_execbuffer2 *args,
	       struct i915_address_space *vm,
	       struct drm_file *file)
135
{
136 137
	struct drm_i915_gem_object *obj;
	struct list_head objects;
138
	int i, ret;
139

140
	INIT_LIST_HEAD(&objects);
141
	spin_lock(&file->table_lock);
142 143
	/* Grab a reference to the object and release the lock so we can lookup
	 * or create the VMA without using GFP_ATOMIC */
144
	for (i = 0; i < args->buffer_count; i++) {
145 146 147 148 149
		obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
		if (obj == NULL) {
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Invalid object handle %d at index %d\n",
				   exec[i].handle, i);
150
			ret = -ENOENT;
151
			goto err;
152 153
		}

154
		if (!list_empty(&obj->obj_exec_link)) {
155 156 157
			spin_unlock(&file->table_lock);
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
				   obj, exec[i].handle, i);
158
			ret = -EINVAL;
159
			goto err;
160 161
		}

162
		i915_gem_object_get(obj);
163 164 165
		list_add_tail(&obj->obj_exec_link, &objects);
	}
	spin_unlock(&file->table_lock);
166

167
	i = 0;
168
	while (!list_empty(&objects)) {
169
		struct i915_vma *vma;
170

171 172 173 174
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);

175 176 177 178 179 180 181 182
		/*
		 * NOTE: We can leak any vmas created here when something fails
		 * later on. But that's no issue since vma_unbind can deal with
		 * vmas which are not actually bound. And since only
		 * lookup_or_create exists as an interface to get at the vma
		 * from the (obj, vm) we don't run the risk of creating
		 * duplicated vmas for the same vm.
		 */
C
Chris Wilson 已提交
183 184
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
		if (unlikely(IS_ERR(vma))) {
185 186
			DRM_DEBUG("Failed to lookup VMA\n");
			ret = PTR_ERR(vma);
187
			goto err;
188 189
		}

190
		/* Transfer ownership from the objects list to the vmas list. */
191
		list_add_tail(&vma->exec_list, &eb->vmas);
192
		list_del_init(&obj->obj_exec_link);
193 194

		vma->exec_entry = &exec[i];
195
		if (eb->and < 0) {
196
			eb->lut[i] = vma;
197 198
		} else {
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
199 200
			vma->exec_handle = handle;
			hlist_add_head(&vma->exec_node,
201 202
				       &eb->buckets[handle & eb->and]);
		}
203
		++i;
204 205
	}

206
	return 0;
207 208


209
err:
210 211 212 213 214
	while (!list_empty(&objects)) {
		obj = list_first_entry(&objects,
				       struct drm_i915_gem_object,
				       obj_exec_link);
		list_del_init(&obj->obj_exec_link);
215
		i915_gem_object_put(obj);
216
	}
217 218 219 220 221
	/*
	 * Objects already transfered to the vmas list will be unreferenced by
	 * eb_destroy.
	 */

222
	return ret;
223 224
}

225
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
226
{
227 228 229 230 231 232
	if (eb->and < 0) {
		if (handle >= -eb->and)
			return NULL;
		return eb->lut[handle];
	} else {
		struct hlist_head *head;
233
		struct i915_vma *vma;
234

235
		head = &eb->buckets[handle & eb->and];
236
		hlist_for_each_entry(vma, head, exec_node) {
237 238
			if (vma->exec_handle == handle)
				return vma;
239 240 241
		}
		return NULL;
	}
242 243
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry;
	struct drm_i915_gem_object *obj = vma->obj;

	if (!drm_mm_node_allocated(&vma->node))
		return;

	entry = vma->exec_entry;

	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
		i915_gem_object_unpin_fence(obj);

	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
259
		__i915_vma_unpin(vma);
260

C
Chris Wilson 已提交
261
	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
262 263 264 265
}

static void eb_destroy(struct eb_vmas *eb)
{
266 267
	while (!list_empty(&eb->vmas)) {
		struct i915_vma *vma;
268

269 270
		vma = list_first_entry(&eb->vmas,
				       struct i915_vma,
271
				       exec_list);
272
		list_del_init(&vma->exec_list);
273
		i915_gem_execbuffer_unreserve_vma(vma);
274
		i915_vma_put(vma);
275
	}
276 277 278
	kfree(eb);
}

279 280
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
281 282
	return (HAS_LLC(obj->base.dev) ||
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
283 284 285
		obj->cache_level != I915_CACHE_NONE);
}

286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
/* Used to convert any address to canonical form.
 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
 * addresses to be in a canonical form:
 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
 * canonical form [63:48] == [47]."
 */
#define GEN8_HIGH_ADDRESS_BIT 47
static inline uint64_t gen8_canonical_addr(uint64_t address)
{
	return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
}

static inline uint64_t gen8_noncanonical_addr(uint64_t address)
{
	return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
}

static inline uint64_t
relocation_target(struct drm_i915_gem_relocation_entry *reloc,
		  uint64_t target_offset)
{
	return gen8_canonical_addr((int)reloc->delta + target_offset);
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
struct reloc_cache {
	void *vaddr;
	unsigned int page;
	enum { KMAP, IOMAP } type;
};

static void reloc_cache_init(struct reloc_cache *cache)
{
	cache->page = -1;
	cache->vaddr = NULL;
}

static void reloc_cache_fini(struct reloc_cache *cache)
{
	if (!cache->vaddr)
		return;

	switch (cache->type) {
	case KMAP:
		kunmap_atomic(cache->vaddr);
		break;

	case IOMAP:
		io_mapping_unmap_atomic(cache->vaddr);
		break;
	}
}

static void *reloc_kmap(struct drm_i915_gem_object *obj,
			struct reloc_cache *cache,
			int page)
{
	if (cache->page == page)
		return cache->vaddr;

	if (cache->vaddr)
		kunmap_atomic(cache->vaddr);

	cache->page = page;
	cache->vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
	cache->type = KMAP;

	return cache->vaddr;
}

356 357
static int
relocate_entry_cpu(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
358
		   struct drm_i915_gem_relocation_entry *reloc,
359
		   struct reloc_cache *cache,
B
Ben Widawsky 已提交
360
		   uint64_t target_offset)
361
{
362
	struct drm_device *dev = obj->base.dev;
363
	uint32_t page_offset = offset_in_page(reloc->offset);
364
	uint64_t delta = relocation_target(reloc, target_offset);
365
	char *vaddr;
366
	int ret;
367

368
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
369 370 371
	if (ret)
		return ret;

372
	vaddr = reloc_kmap(obj, cache, reloc->offset >> PAGE_SHIFT);
B
Ben Widawsky 已提交
373
	*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
374

375 376 377 378 379
	if (INTEL_GEN(dev) >= 8) {
		page_offset += sizeof(uint32_t);
		if (page_offset == PAGE_SIZE) {
			vaddr = reloc_kmap(obj, cache, cache->page + 1);
			page_offset = 0;
380
		}
B
Ben Widawsky 已提交
381
		*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
382 383
	}

384 385 386
	return 0;
}

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
static void *reloc_iomap(struct drm_i915_private *i915,
			 struct reloc_cache *cache,
			 uint64_t offset)
{
	if (cache->page == offset >> PAGE_SHIFT)
		return cache->vaddr;

	if (cache->vaddr)
		io_mapping_unmap_atomic(cache->vaddr);

	cache->page = offset >> PAGE_SHIFT;
	cache->vaddr =
		io_mapping_map_atomic_wc(i915->ggtt.mappable,
					 offset & PAGE_MASK);
	cache->type = IOMAP;

	return cache->vaddr;
}

406 407
static int
relocate_entry_gtt(struct drm_i915_gem_object *obj,
B
Ben Widawsky 已提交
408
		   struct drm_i915_gem_relocation_entry *reloc,
409
		   struct reloc_cache *cache,
B
Ben Widawsky 已提交
410
		   uint64_t target_offset)
411
{
C
Chris Wilson 已提交
412 413
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_vma *vma;
414
	uint64_t delta = relocation_target(reloc, target_offset);
415
	uint64_t offset;
416
	void __iomem *reloc_page;
417
	int ret;
418

C
Chris Wilson 已提交
419 420 421 422
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

423 424
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
C
Chris Wilson 已提交
425
		goto unpin;
426 427 428

	ret = i915_gem_object_put_fence(obj);
	if (ret)
C
Chris Wilson 已提交
429
		goto unpin;
430 431

	/* Map the page containing the relocation we're going to perform.  */
C
Chris Wilson 已提交
432
	offset = vma->node.start + reloc->offset;
433
	reloc_page = reloc_iomap(dev_priv, cache, offset);
434
	iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
435

C
Chris Wilson 已提交
436
	if (INTEL_GEN(dev_priv) >= 8) {
437
		offset += sizeof(uint32_t);
438 439
		if (offset_in_page(offset) == 0)
			reloc_page = reloc_iomap(dev_priv, cache, offset);
440 441
		iowrite32(upper_32_bits(delta),
			  reloc_page + offset_in_page(offset));
442 443
	}

C
Chris Wilson 已提交
444
unpin:
445
	__i915_vma_unpin(vma);
C
Chris Wilson 已提交
446
	return ret;
447 448
}

449 450 451 452 453 454 455 456 457 458 459 460
static void
clflush_write32(void *addr, uint32_t value)
{
	/* This is not a fast path, so KISS. */
	drm_clflush_virt_range(addr, sizeof(uint32_t));
	*(uint32_t *)addr = value;
	drm_clflush_virt_range(addr, sizeof(uint32_t));
}

static int
relocate_entry_clflush(struct drm_i915_gem_object *obj,
		       struct drm_i915_gem_relocation_entry *reloc,
461
		       struct reloc_cache *cache,
462 463 464 465
		       uint64_t target_offset)
{
	struct drm_device *dev = obj->base.dev;
	uint32_t page_offset = offset_in_page(reloc->offset);
466
	uint64_t delta = relocation_target(reloc, target_offset);
467 468 469 470 471 472 473
	char *vaddr;
	int ret;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		return ret;

474
	vaddr = reloc_kmap(obj, cache, reloc->offset >> PAGE_SHIFT);
475 476
	clflush_write32(vaddr + page_offset, lower_32_bits(delta));

477 478 479 480 481
	if (INTEL_GEN(dev) >= 8) {
		page_offset += sizeof(uint32_t);
		if (page_offset == PAGE_SIZE) {
			vaddr = reloc_kmap(obj, cache, cache->page + 1);
			page_offset = 0;
482 483 484 485 486 487 488
		}
		clflush_write32(vaddr + page_offset, upper_32_bits(delta));
	}

	return 0;
}

489 490
static bool object_is_idle(struct drm_i915_gem_object *obj)
{
491
	unsigned long active = i915_gem_object_get_active(obj);
492 493 494 495 496 497 498 499 500 501 502
	int idx;

	for_each_active(active, idx) {
		if (!i915_gem_active_is_idle(&obj->last_read[idx],
					     &obj->base.dev->struct_mutex))
			return false;
	}

	return true;
}

503 504
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
505
				   struct eb_vmas *eb,
506 507
				   struct drm_i915_gem_relocation_entry *reloc,
				   struct reloc_cache *cache)
508 509 510
{
	struct drm_device *dev = obj->base.dev;
	struct drm_gem_object *target_obj;
511
	struct drm_i915_gem_object *target_i915_obj;
512
	struct i915_vma *target_vma;
B
Ben Widawsky 已提交
513
	uint64_t target_offset;
514
	int ret;
515

516
	/* we've already hold a reference to all valid objects */
517 518
	target_vma = eb_get_vma(eb, reloc->target_handle);
	if (unlikely(target_vma == NULL))
519
		return -ENOENT;
520 521
	target_i915_obj = target_vma->obj;
	target_obj = &target_vma->obj->base;
522

523
	target_offset = gen8_canonical_addr(target_vma->node.start);
524

525 526 527 528
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
	 * pipe_control writes because the gpu doesn't properly redirect them
	 * through the ppgtt for non_secure batchbuffers. */
	if (unlikely(IS_GEN6(dev) &&
529
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
530
		ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
531
				    PIN_GLOBAL);
532 533 534
		if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
			return ret;
	}
535

536
	/* Validate that the target is in a valid r/w GPU domain */
537
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
538
		DRM_DEBUG("reloc with multiple write domains: "
539 540 541 542 543 544
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
545
		return -EINVAL;
546
	}
547 548
	if (unlikely((reloc->write_domain | reloc->read_domains)
		     & ~I915_GEM_GPU_DOMAINS)) {
549
		DRM_DEBUG("reloc with read/write non-GPU domains: "
550 551 552 553 554 555
			  "obj %p target %d offset %d "
			  "read %08x write %08x",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  reloc->read_domains,
			  reloc->write_domain);
556
		return -EINVAL;
557 558 559 560 561 562 563 564 565
	}

	target_obj->pending_read_domains |= reloc->read_domains;
	target_obj->pending_write_domain |= reloc->write_domain;

	/* If the relocation already has the right value in it, no
	 * more work needs to be done.
	 */
	if (target_offset == reloc->presumed_offset)
566
		return 0;
567 568

	/* Check that the relocation address is valid... */
569 570
	if (unlikely(reloc->offset >
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
571
		DRM_DEBUG("Relocation beyond object bounds: "
572 573 574 575
			  "obj %p target %d offset %d size %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset,
			  (int) obj->base.size);
576
		return -EINVAL;
577
	}
578
	if (unlikely(reloc->offset & 3)) {
579
		DRM_DEBUG("Relocation not 4-byte aligned: "
580 581 582
			  "obj %p target %d offset %d.\n",
			  obj, reloc->target_handle,
			  (int) reloc->offset);
583
		return -EINVAL;
584 585
	}

586
	/* We can't wait for rendering with pagefaults disabled */
587
	if (pagefault_disabled() && !object_is_idle(obj))
588 589
		return -EFAULT;

590
	if (use_cpu_reloc(obj))
591
		ret = relocate_entry_cpu(obj, reloc, cache, target_offset);
592
	else if (obj->map_and_fenceable)
593
		ret = relocate_entry_gtt(obj, reloc, cache, target_offset);
594
	else if (static_cpu_has(X86_FEATURE_CLFLUSH))
595
		ret = relocate_entry_clflush(obj, reloc, cache, target_offset);
596 597 598 599
	else {
		WARN_ONCE(1, "Impossible case in relocation handling\n");
		ret = -ENODEV;
	}
600

601 602 603
	if (ret)
		return ret;

604 605 606
	/* and update the user's relocation entry */
	reloc->presumed_offset = target_offset;

607
	return 0;
608 609 610
}

static int
611 612
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
				 struct eb_vmas *eb)
613
{
614 615
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
616
	struct drm_i915_gem_relocation_entry __user *user_relocs;
617
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
618 619
	struct reloc_cache cache;
	int remain, ret = 0;
620

621
	user_relocs = u64_to_user_ptr(entry->relocs_ptr);
622
	reloc_cache_init(&cache);
623

624 625 626 627 628 629 630 631
	remain = entry->relocation_count;
	while (remain) {
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
		int count = remain;
		if (count > ARRAY_SIZE(stack_reloc))
			count = ARRAY_SIZE(stack_reloc);
		remain -= count;

632 633 634 635
		if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) {
			ret = -EFAULT;
			goto out;
		}
636

637 638
		do {
			u64 offset = r->presumed_offset;
639

640
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
641
			if (ret)
642
				goto out;
643 644

			if (r->presumed_offset != offset &&
645 646 647 648
			    __put_user(r->presumed_offset,
				       &user_relocs->presumed_offset)) {
				ret = -EFAULT;
				goto out;
649 650 651 652 653
			}

			user_relocs++;
			r++;
		} while (--count);
654 655
	}

656 657 658
out:
	reloc_cache_fini(&cache);
	return ret;
659
#undef N_RELOC
660 661 662
}

static int
663 664 665
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
				      struct eb_vmas *eb,
				      struct drm_i915_gem_relocation_entry *relocs)
666
{
667
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
668 669
	struct reloc_cache cache;
	int i, ret = 0;
670

671
	reloc_cache_init(&cache);
672
	for (i = 0; i < entry->relocation_count; i++) {
673
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
674
		if (ret)
675
			break;
676
	}
677
	reloc_cache_fini(&cache);
678

679
	return ret;
680 681 682
}

static int
B
Ben Widawsky 已提交
683
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
684
{
685
	struct i915_vma *vma;
686 687 688 689 690 691 692 693 694 695
	int ret = 0;

	/* This is the fast path and we cannot handle a pagefault whilst
	 * holding the struct mutex lest the user pass in the relocations
	 * contained within a mmaped bo. For in such a case we, the page
	 * fault handler would call i915_gem_fault() and we would try to
	 * acquire the struct mutex again. Obviously this is bad and so
	 * lockdep complains vehemently.
	 */
	pagefault_disable();
696 697
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
698
		if (ret)
699
			break;
700
	}
701
	pagefault_enable();
702

703
	return ret;
704 705
}

706 707 708 709 710 711
static bool only_mappable_for_reloc(unsigned int flags)
{
	return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
		__EXEC_OBJECT_NEEDS_MAP;
}

712
static int
713
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
714
				struct intel_engine_cs *engine,
715
				bool *need_reloc)
716
{
717
	struct drm_i915_gem_object *obj = vma->obj;
718
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
719
	uint64_t flags;
720 721
	int ret;

722
	flags = PIN_USER;
723 724 725
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
		flags |= PIN_GLOBAL;

726
	if (!drm_mm_node_allocated(&vma->node)) {
727 728 729 730 731
		/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
		 * limit address to the first 4GBs for unflagged objects.
		 */
		if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
			flags |= PIN_ZONE_4G;
732 733 734 735
		if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
			flags |= PIN_GLOBAL | PIN_MAPPABLE;
		if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
			flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
736 737
		if (entry->flags & EXEC_OBJECT_PINNED)
			flags |= entry->offset | PIN_OFFSET_FIXED;
738 739
		if ((flags & PIN_MAPPABLE) == 0)
			flags |= PIN_HIGH;
740
	}
741

742 743 744 745 746
	ret = i915_vma_pin(vma,
			   entry->pad_to_size,
			   entry->alignment,
			   flags);
	if ((ret == -ENOSPC || ret == -E2BIG) &&
747
	    only_mappable_for_reloc(entry->flags))
748 749 750 751
		ret = i915_vma_pin(vma,
				   entry->pad_to_size,
				   entry->alignment,
				   flags & ~PIN_MAPPABLE);
752 753 754
	if (ret)
		return ret;

755 756
	entry->flags |= __EXEC_OBJECT_HAS_PIN;

757 758 759 760
	if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
		ret = i915_gem_object_get_fence(obj);
		if (ret)
			return ret;
761

762 763
		if (i915_gem_object_pin_fence(obj))
			entry->flags |= __EXEC_OBJECT_HAS_FENCE;
764 765
	}

766 767
	if (entry->offset != vma->node.start) {
		entry->offset = vma->node.start;
768 769 770 771 772 773 774 775
		*need_reloc = true;
	}

	if (entry->flags & EXEC_OBJECT_WRITE) {
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
	}

776
	return 0;
777
}
778

779
static bool
780
need_reloc_mappable(struct i915_vma *vma)
781 782 783
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;

784 785 786
	if (entry->relocation_count == 0)
		return false;

787
	if (!i915_vma_is_ggtt(vma))
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
		return false;

	/* See also use_cpu_reloc() */
	if (HAS_LLC(vma->obj->base.dev))
		return false;

	if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

	return true;
}

static bool
eb_vma_misplaced(struct i915_vma *vma)
{
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
	struct drm_i915_gem_object *obj = vma->obj;
805

806 807
	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
		!i915_vma_is_ggtt(vma));
808 809 810 811 812

	if (entry->alignment &&
	    vma->node.start & (entry->alignment - 1))
		return true;

813 814 815
	if (vma->node.size < entry->pad_to_size)
		return true;

816 817 818 819
	if (entry->flags & EXEC_OBJECT_PINNED &&
	    vma->node.start != entry->offset)
		return true;

820 821 822 823
	if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
	    vma->node.start < BATCH_OFFSET_BIAS)
		return true;

824 825 826 827
	/* avoid costly ping-pong once a batch bo ended up non-mappable */
	if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
		return !only_mappable_for_reloc(entry->flags);

828 829 830 831
	if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
	    (vma->node.start + vma->node.size - 1) >> 32)
		return true;

832 833 834
	return false;
}

835
static int
836
i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
837
			    struct list_head *vmas,
838
			    struct i915_gem_context *ctx,
839
			    bool *need_relocs)
840
{
841
	struct drm_i915_gem_object *obj;
842
	struct i915_vma *vma;
843
	struct i915_address_space *vm;
844
	struct list_head ordered_vmas;
845
	struct list_head pinned_vmas;
846
	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
847
	int retry;
848

849 850
	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;

851
	INIT_LIST_HEAD(&ordered_vmas);
852
	INIT_LIST_HEAD(&pinned_vmas);
853
	while (!list_empty(vmas)) {
854 855 856
		struct drm_i915_gem_exec_object2 *entry;
		bool need_fence, need_mappable;

857 858 859
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
		obj = vma->obj;
		entry = vma->exec_entry;
860

861 862 863
		if (ctx->flags & CONTEXT_NO_ZEROMAP)
			entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;

864 865
		if (!has_fenced_gpu_access)
			entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
866 867
		need_fence =
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
868
			i915_gem_object_is_tiled(obj);
869
		need_mappable = need_fence || need_reloc_mappable(vma);
870

871 872 873
		if (entry->flags & EXEC_OBJECT_PINNED)
			list_move_tail(&vma->exec_list, &pinned_vmas);
		else if (need_mappable) {
874
			entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
875
			list_move(&vma->exec_list, &ordered_vmas);
876
		} else
877
			list_move_tail(&vma->exec_list, &ordered_vmas);
878

879
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
880
		obj->base.pending_write_domain = 0;
881
	}
882
	list_splice(&ordered_vmas, vmas);
883
	list_splice(&pinned_vmas, vmas);
884 885 886 887 888 889 890 891 892 893

	/* Attempt to pin all of the buffers into the GTT.
	 * This is done in 3 phases:
	 *
	 * 1a. Unbind all objects that do not match the GTT constraints for
	 *     the execbuffer (fenceable, mappable, alignment etc).
	 * 1b. Increment pin count for already bound objects.
	 * 2.  Bind new objects.
	 * 3.  Decrement pin count.
	 *
894
	 * This avoid unnecessary unbinding of later objects in order to make
895 896 897 898
	 * room for the earlier objects *unless* we need to defragment.
	 */
	retry = 0;
	do {
899
		int ret = 0;
900 901

		/* Unbind any ill-fitting objects or pin. */
902 903
		list_for_each_entry(vma, vmas, exec_list) {
			if (!drm_mm_node_allocated(&vma->node))
904 905
				continue;

906
			if (eb_vma_misplaced(vma))
907
				ret = i915_vma_unbind(vma);
908
			else
909 910 911
				ret = i915_gem_execbuffer_reserve_vma(vma,
								      engine,
								      need_relocs);
912
			if (ret)
913 914 915 916
				goto err;
		}

		/* Bind fresh objects */
917 918
		list_for_each_entry(vma, vmas, exec_list) {
			if (drm_mm_node_allocated(&vma->node))
919
				continue;
920

921 922
			ret = i915_gem_execbuffer_reserve_vma(vma, engine,
							      need_relocs);
923 924
			if (ret)
				goto err;
925 926
		}

927
err:
C
Chris Wilson 已提交
928
		if (ret != -ENOSPC || retry++)
929 930
			return ret;

931 932 933 934
		/* Decrement pin count for bound objects */
		list_for_each_entry(vma, vmas, exec_list)
			i915_gem_execbuffer_unreserve_vma(vma);

935
		ret = i915_gem_evict_vm(vm, true);
936 937 938 939 940 941 942
		if (ret)
			return ret;
	} while (1);
}

static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
943
				  struct drm_i915_gem_execbuffer2 *args,
944
				  struct drm_file *file,
945
				  struct intel_engine_cs *engine,
946
				  struct eb_vmas *eb,
947
				  struct drm_i915_gem_exec_object2 *exec,
948
				  struct i915_gem_context *ctx)
949 950
{
	struct drm_i915_gem_relocation_entry *reloc;
951 952
	struct i915_address_space *vm;
	struct i915_vma *vma;
953
	bool need_relocs;
954
	int *reloc_offset;
955
	int i, total, ret;
956
	unsigned count = args->buffer_count;
957

958 959
	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;

960
	/* We may process another execbuffer during the unlock... */
961 962 963
	while (!list_empty(&eb->vmas)) {
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
		list_del_init(&vma->exec_list);
964
		i915_gem_execbuffer_unreserve_vma(vma);
965
		i915_vma_put(vma);
966 967
	}

968 969 970 971
	mutex_unlock(&dev->struct_mutex);

	total = 0;
	for (i = 0; i < count; i++)
972
		total += exec[i].relocation_count;
973

974
	reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
975
	reloc = drm_malloc_ab(total, sizeof(*reloc));
976 977 978
	if (reloc == NULL || reloc_offset == NULL) {
		drm_free_large(reloc);
		drm_free_large(reloc_offset);
979 980 981 982 983 984 985
		mutex_lock(&dev->struct_mutex);
		return -ENOMEM;
	}

	total = 0;
	for (i = 0; i < count; i++) {
		struct drm_i915_gem_relocation_entry __user *user_relocs;
986 987
		u64 invalid_offset = (u64)-1;
		int j;
988

989
		user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
990 991

		if (copy_from_user(reloc+total, user_relocs,
992
				   exec[i].relocation_count * sizeof(*reloc))) {
993 994 995 996 997
			ret = -EFAULT;
			mutex_lock(&dev->struct_mutex);
			goto err;
		}

998 999 1000 1001 1002 1003 1004 1005 1006 1007
		/* As we do not update the known relocation offsets after
		 * relocating (due to the complexities in lock handling),
		 * we need to mark them as invalid now so that we force the
		 * relocation processing next time. Just in case the target
		 * object is evicted and then rebound into its old
		 * presumed_offset before the next execbuffer - if that
		 * happened we would make the mistake of assuming that the
		 * relocations were valid.
		 */
		for (j = 0; j < exec[i].relocation_count; j++) {
1008 1009 1010
			if (__copy_to_user(&user_relocs[j].presumed_offset,
					   &invalid_offset,
					   sizeof(invalid_offset))) {
1011 1012 1013 1014 1015 1016
				ret = -EFAULT;
				mutex_lock(&dev->struct_mutex);
				goto err;
			}
		}

1017
		reloc_offset[i] = total;
1018
		total += exec[i].relocation_count;
1019 1020 1021 1022 1023 1024 1025 1026
	}

	ret = i915_mutex_lock_interruptible(dev);
	if (ret) {
		mutex_lock(&dev->struct_mutex);
		goto err;
	}

1027 1028
	/* reacquire the objects */
	eb_reset(eb);
1029
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1030 1031
	if (ret)
		goto err;
1032

1033
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1034 1035
	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
					  &need_relocs);
1036 1037 1038
	if (ret)
		goto err;

1039 1040 1041 1042
	list_for_each_entry(vma, &eb->vmas, exec_list) {
		int offset = vma->exec_entry - exec;
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
							    reloc + reloc_offset[offset]);
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
		if (ret)
			goto err;
	}

	/* Leave the user relocations as are, this is the painfully slow path,
	 * and we want to avoid the complication of dropping the lock whilst
	 * having buffers reserved in the aperture and so causing spurious
	 * ENOSPC for random operations.
	 */

err:
	drm_free_large(reloc);
1055
	drm_free_large(reloc_offset);
1056 1057 1058
	return ret;
}

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
{
	unsigned int mask;

	mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
	mask <<= I915_BO_ACTIVE_SHIFT;

	return mask;
}

1069
static int
1070
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
1071
				struct list_head *vmas)
1072
{
1073
	const unsigned int other_rings = eb_other_engines(req);
1074
	struct i915_vma *vma;
1075
	int ret;
1076

1077 1078
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
1079

1080
		if (obj->flags & other_rings) {
1081
			ret = i915_gem_object_sync(obj, req);
1082 1083 1084
			if (ret)
				return ret;
		}
1085 1086

		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
1087
			i915_gem_clflush_object(obj, false);
1088 1089
	}

1090 1091
	/* Unconditionally flush any chipset caches (for streaming writes). */
	i915_gem_chipset_flush(req->engine->i915);
1092

1093
	/* Unconditionally invalidate GPU caches and TLBs. */
1094
	return req->engine->emit_flush(req, EMIT_INVALIDATE);
1095 1096
}

1097 1098
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1099
{
1100 1101 1102
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
		return false;

C
Chris Wilson 已提交
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
	/* Kernel clipping was a DRI1 misfeature */
	if (exec->num_cliprects || exec->cliprects_ptr)
		return false;

	if (exec->DR4 == 0xffffffff) {
		DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
		exec->DR4 = 0;
	}
	if (exec->DR1 || exec->DR4)
		return false;

	if ((exec->batch_start_offset | exec->batch_len) & 0x7)
		return false;

	return true;
1118 1119 1120
}

static int
1121 1122
validate_exec_list(struct drm_device *dev,
		   struct drm_i915_gem_exec_object2 *exec,
1123 1124
		   int count)
{
1125 1126
	unsigned relocs_total = 0;
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1127 1128 1129
	unsigned invalid_flags;
	int i;

1130 1131 1132
	/* INTERNAL flags must not overlap with external ones */
	BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);

1133 1134 1135
	invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
	if (USES_FULL_PPGTT(dev))
		invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1136 1137

	for (i = 0; i < count; i++) {
1138
		char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1139 1140
		int length; /* limited by fault_in_pages_readable() */

1141
		if (exec[i].flags & invalid_flags)
1142 1143
			return -EINVAL;

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
		/* Offset can be used as input (EXEC_OBJECT_PINNED), reject
		 * any non-page-aligned or non-canonical addresses.
		 */
		if (exec[i].flags & EXEC_OBJECT_PINNED) {
			if (exec[i].offset !=
			    gen8_canonical_addr(exec[i].offset & PAGE_MASK))
				return -EINVAL;

			/* From drm_mm perspective address space is continuous,
			 * so from this point we're always using non-canonical
			 * form internally.
			 */
			exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
		}

1159 1160 1161
		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
			return -EINVAL;

1162 1163 1164 1165 1166 1167 1168 1169
		/* pad_to_size was once a reserved field, so sanitize it */
		if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
			if (offset_in_page(exec[i].pad_to_size))
				return -EINVAL;
		} else {
			exec[i].pad_to_size = 0;
		}

1170 1171 1172 1173 1174
		/* First check for malicious input causing overflow in
		 * the worst case where we need to allocate the entire
		 * relocation tree as a single array.
		 */
		if (exec[i].relocation_count > relocs_max - relocs_total)
1175
			return -EINVAL;
1176
		relocs_total += exec[i].relocation_count;
1177 1178 1179

		length = exec[i].relocation_count *
			sizeof(struct drm_i915_gem_relocation_entry);
1180 1181 1182 1183 1184
		/*
		 * We must check that the entire relocation array is safe
		 * to read, but since we may need to update the presumed
		 * offsets during execution, check for full write access.
		 */
1185 1186 1187
		if (!access_ok(VERIFY_WRITE, ptr, length))
			return -EFAULT;

1188
		if (likely(!i915.prefault_disable)) {
1189 1190 1191
			if (fault_in_multipages_readable(ptr, length))
				return -EFAULT;
		}
1192 1193 1194 1195 1196
	}

	return 0;
}

1197
static struct i915_gem_context *
1198
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1199
			  struct intel_engine_cs *engine, const u32 ctx_id)
1200
{
1201
	struct i915_gem_context *ctx = NULL;
1202 1203
	struct i915_ctx_hang_stats *hs;

1204
	if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1205 1206
		return ERR_PTR(-EINVAL);

1207
	ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1208
	if (IS_ERR(ctx))
1209
		return ctx;
1210

1211
	hs = &ctx->hang_stats;
1212 1213
	if (hs->banned) {
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1214
		return ERR_PTR(-EIO);
1215 1216
	}

1217
	return ctx;
1218 1219
}

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
void i915_vma_move_to_active(struct i915_vma *vma,
			     struct drm_i915_gem_request *req,
			     unsigned int flags)
{
	struct drm_i915_gem_object *obj = vma->obj;
	const unsigned int idx = req->engine->id;

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

	obj->dirty = 1; /* be paranoid  */

1231 1232 1233 1234 1235 1236 1237
	/* Add a reference if we're newly entering the active list.
	 * The order in which we add operations to the retirement queue is
	 * vital here: mark_active adds to the start of the callback list,
	 * such that subsequent callbacks are called first. Therefore we
	 * add the active reference first and queue for it to be dropped
	 * *last*.
	 */
1238
	if (!i915_gem_object_is_active(obj))
1239
		i915_gem_object_get(obj);
1240
	i915_gem_object_set_active(obj, idx);
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
	i915_gem_active_set(&obj->last_read[idx], req);

	if (flags & EXEC_OBJECT_WRITE) {
		i915_gem_active_set(&obj->last_write, req);

		intel_fb_obj_invalidate(obj, ORIGIN_CS);

		/* update for the implicit flush after a batch */
		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
	}

	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
		i915_gem_active_set(&obj->last_fence, req);
		if (flags & __EXEC_OBJECT_HAS_FENCE) {
			struct drm_i915_private *dev_priv = req->i915;

			list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
				       &dev_priv->mm.fence_list);
		}
	}

1262 1263
	i915_vma_set_active(vma, idx);
	i915_gem_active_set(&vma->last_read[idx], req);
1264 1265 1266
	list_move_tail(&vma->vm_link, &vma->vm->active_list);
}

1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
static void eb_export_fence(struct drm_i915_gem_object *obj,
			    struct drm_i915_gem_request *req,
			    unsigned int flags)
{
	struct reservation_object *resv;

	resv = i915_gem_object_get_dmabuf_resv(obj);
	if (!resv)
		return;

	/* Ignore errors from failing to allocate the new fence, we can't
	 * handle an error right now. Worst case should be missed
	 * synchronisation leading to rendering corruption.
	 */
	ww_mutex_lock(&resv->lock, NULL);
	if (flags & EXEC_OBJECT_WRITE)
		reservation_object_add_excl_fence(resv, &req->fence);
	else if (reservation_object_reserve_shared(resv) == 0)
		reservation_object_add_shared_fence(resv, &req->fence);
	ww_mutex_unlock(&resv->lock);
}

1289
static void
1290
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1291
				   struct drm_i915_gem_request *req)
1292
{
1293
	struct i915_vma *vma;
1294

1295 1296
	list_for_each_entry(vma, vmas, exec_list) {
		struct drm_i915_gem_object *obj = vma->obj;
1297 1298
		u32 old_read = obj->base.read_domains;
		u32 old_write = obj->base.write_domain;
C
Chris Wilson 已提交
1299

1300
		obj->base.write_domain = obj->base.pending_write_domain;
1301 1302 1303
		if (obj->base.write_domain)
			vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
		else
1304 1305
			obj->base.pending_read_domains |= obj->base.read_domains;
		obj->base.read_domains = obj->base.pending_read_domains;
1306

1307
		i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
1308
		eb_export_fence(obj, req, vma->exec_entry->flags);
C
Chris Wilson 已提交
1309
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
1310 1311 1312
	}
}

1313
static int
1314
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1315
{
1316
	struct intel_ring *ring = req->ring;
1317 1318
	int ret, i;

1319
	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1320 1321 1322
		DRM_DEBUG("sol reset is gen7/rcs only\n");
		return -EINVAL;
	}
1323

1324
	ret = intel_ring_begin(req, 4 * 3);
1325 1326 1327 1328
	if (ret)
		return ret;

	for (i = 0; i < 4; i++) {
1329 1330 1331
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
		intel_ring_emit(ring, 0);
1332 1333
	}

1334
	intel_ring_advance(ring);
1335 1336 1337 1338

	return 0;
}

C
Chris Wilson 已提交
1339
static struct i915_vma *
1340
i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1341 1342
			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
			  struct drm_i915_gem_object *batch_obj,
1343
			  struct eb_vmas *eb,
1344 1345
			  u32 batch_start_offset,
			  u32 batch_len,
1346
			  bool is_master)
1347 1348
{
	struct drm_i915_gem_object *shadow_batch_obj;
1349
	struct i915_vma *vma;
1350 1351
	int ret;

1352
	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1353
						   PAGE_ALIGN(batch_len));
1354
	if (IS_ERR(shadow_batch_obj))
1355
		return ERR_CAST(shadow_batch_obj);
1356

1357 1358 1359 1360 1361 1362
	ret = intel_engine_cmd_parser(engine,
				      batch_obj,
				      shadow_batch_obj,
				      batch_start_offset,
				      batch_len,
				      is_master);
C
Chris Wilson 已提交
1363 1364 1365 1366 1367 1368 1369
	if (ret) {
		if (ret == -EACCES) /* unhandled chained batch */
			vma = NULL;
		else
			vma = ERR_PTR(ret);
		goto out;
	}
1370

C
Chris Wilson 已提交
1371 1372 1373
	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		goto out;
C
Chris Wilson 已提交
1374

1375
	memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1376

1377
	vma->exec_entry = shadow_exec_entry;
C
Chris Wilson 已提交
1378
	vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1379
	i915_gem_object_get(shadow_batch_obj);
1380
	list_add_tail(&vma->exec_list, &eb->vmas);
1381

C
Chris Wilson 已提交
1382
out:
C
Chris Wilson 已提交
1383
	i915_gem_object_unpin_pages(shadow_batch_obj);
C
Chris Wilson 已提交
1384
	return vma;
1385
}
1386

1387 1388 1389 1390
static int
execbuf_submit(struct i915_execbuffer_params *params,
	       struct drm_i915_gem_execbuffer2 *args,
	       struct list_head *vmas)
1391
{
1392
	struct drm_i915_private *dev_priv = params->request->i915;
1393
	u64 exec_start, exec_len;
1394 1395
	int instp_mode;
	u32 instp_mask;
C
Chris Wilson 已提交
1396
	int ret;
1397

1398
	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1399
	if (ret)
C
Chris Wilson 已提交
1400
		return ret;
1401

1402
	ret = i915_switch_context(params->request);
1403
	if (ret)
C
Chris Wilson 已提交
1404
		return ret;
1405 1406 1407 1408 1409 1410 1411

	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
	instp_mask = I915_EXEC_CONSTANTS_MASK;
	switch (instp_mode) {
	case I915_EXEC_CONSTANTS_REL_GENERAL:
	case I915_EXEC_CONSTANTS_ABSOLUTE:
	case I915_EXEC_CONSTANTS_REL_SURFACE:
1412
		if (instp_mode != 0 && params->engine->id != RCS) {
1413
			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
C
Chris Wilson 已提交
1414
			return -EINVAL;
1415 1416 1417
		}

		if (instp_mode != dev_priv->relative_constants_mode) {
1418
			if (INTEL_INFO(dev_priv)->gen < 4) {
1419
				DRM_DEBUG("no rel constants on pre-gen4\n");
C
Chris Wilson 已提交
1420
				return -EINVAL;
1421 1422
			}

1423
			if (INTEL_INFO(dev_priv)->gen > 5 &&
1424 1425
			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
C
Chris Wilson 已提交
1426
				return -EINVAL;
1427 1428 1429
			}

			/* The HW changed the meaning on this bit on gen6 */
1430
			if (INTEL_INFO(dev_priv)->gen >= 6)
1431 1432 1433 1434 1435
				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
		}
		break;
	default:
		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
C
Chris Wilson 已提交
1436
		return -EINVAL;
1437 1438
	}

1439
	if (params->engine->id == RCS &&
C
Chris Wilson 已提交
1440
	    instp_mode != dev_priv->relative_constants_mode) {
1441
		struct intel_ring *ring = params->request->ring;
1442

1443
		ret = intel_ring_begin(params->request, 4);
1444
		if (ret)
C
Chris Wilson 已提交
1445
			return ret;
1446

1447 1448 1449 1450 1451
		intel_ring_emit(ring, MI_NOOP);
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
		intel_ring_emit_reg(ring, INSTPM);
		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
		intel_ring_advance(ring);
1452 1453 1454 1455 1456

		dev_priv->relative_constants_mode = instp_mode;
	}

	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1457
		ret = i915_reset_gen7_sol_offsets(params->request);
1458
		if (ret)
C
Chris Wilson 已提交
1459
			return ret;
1460 1461
	}

1462
	exec_len   = args->batch_len;
1463
	exec_start = params->batch->node.start +
1464 1465
		     params->args_batch_start_offset;

1466
	if (exec_len == 0)
1467
		exec_len = params->batch->size;
1468

1469 1470 1471
	ret = params->engine->emit_bb_start(params->request,
					    exec_start, exec_len,
					    params->dispatch_flags);
C
Chris Wilson 已提交
1472 1473
	if (ret)
		return ret;
1474

1475
	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1476

1477
	i915_gem_execbuffer_move_to_active(vmas, params->request);
1478

C
Chris Wilson 已提交
1479
	return 0;
1480 1481
}

1482 1483
/**
 * Find one BSD ring to dispatch the corresponding BSD command.
1484
 * The engine index is returned.
1485
 */
1486
static unsigned int
1487 1488
gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
			 struct drm_file *file)
1489 1490 1491
{
	struct drm_i915_file_private *file_priv = file->driver_priv;

1492
	/* Check whether the file_priv has already selected one ring. */
1493
	if ((int)file_priv->bsd_engine < 0) {
1494
		/* If not, use the ping-pong mechanism to select one. */
1495
		mutex_lock(&dev_priv->drm.struct_mutex);
1496 1497
		file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
		dev_priv->mm.bsd_engine_dispatch_index ^= 1;
1498
		mutex_unlock(&dev_priv->drm.struct_mutex);
1499
	}
1500

1501
	return file_priv->bsd_engine;
1502 1503
}

1504 1505
#define I915_USER_RINGS (4)

1506
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1507 1508 1509 1510 1511 1512 1513
	[I915_EXEC_DEFAULT]	= RCS,
	[I915_EXEC_RENDER]	= RCS,
	[I915_EXEC_BLT]		= BCS,
	[I915_EXEC_BSD]		= VCS,
	[I915_EXEC_VEBOX]	= VECS
};

1514 1515 1516 1517
static struct intel_engine_cs *
eb_select_engine(struct drm_i915_private *dev_priv,
		 struct drm_file *file,
		 struct drm_i915_gem_execbuffer2 *args)
1518 1519
{
	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1520
	struct intel_engine_cs *engine;
1521 1522 1523

	if (user_ring_id > I915_USER_RINGS) {
		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1524
		return NULL;
1525 1526 1527 1528 1529 1530
	}

	if ((user_ring_id != I915_EXEC_BSD) &&
	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
		DRM_DEBUG("execbuf with non bsd ring but with invalid "
			  "bsd dispatch flags: %d\n", (int)(args->flags));
1531
		return NULL;
1532 1533 1534 1535 1536 1537
	}

	if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;

		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1538
			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
1539 1540
		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
			   bsd_idx <= I915_EXEC_BSD_RING2) {
1541
			bsd_idx >>= I915_EXEC_BSD_SHIFT;
1542 1543 1544 1545
			bsd_idx--;
		} else {
			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
				  bsd_idx);
1546
			return NULL;
1547 1548
		}

1549
		engine = &dev_priv->engine[_VCS(bsd_idx)];
1550
	} else {
1551
		engine = &dev_priv->engine[user_ring_map[user_ring_id]];
1552 1553
	}

1554
	if (!intel_engine_initialized(engine)) {
1555
		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1556
		return NULL;
1557 1558
	}

1559
	return engine;
1560 1561
}

1562 1563 1564 1565
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_file *file,
		       struct drm_i915_gem_execbuffer2 *args,
1566
		       struct drm_i915_gem_exec_object2 *exec)
1567
{
1568 1569
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1570
	struct eb_vmas *eb;
1571
	struct drm_i915_gem_exec_object2 shadow_exec_entry;
1572
	struct intel_engine_cs *engine;
1573
	struct i915_gem_context *ctx;
1574
	struct i915_address_space *vm;
1575 1576
	struct i915_execbuffer_params params_master; /* XXX: will be removed later */
	struct i915_execbuffer_params *params = &params_master;
1577
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1578
	u32 dispatch_flags;
1579
	int ret;
1580
	bool need_relocs;
1581

1582
	if (!i915_gem_check_execbuffer(args))
1583 1584
		return -EINVAL;

1585
	ret = validate_exec_list(dev, exec, args->buffer_count);
1586 1587 1588
	if (ret)
		return ret;

1589
	dispatch_flags = 0;
1590
	if (args->flags & I915_EXEC_SECURE) {
1591
		if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1592 1593
		    return -EPERM;

1594
		dispatch_flags |= I915_DISPATCH_SECURE;
1595
	}
1596
	if (args->flags & I915_EXEC_IS_PINNED)
1597
		dispatch_flags |= I915_DISPATCH_PINNED;
1598

1599 1600 1601
	engine = eb_select_engine(dev_priv, file, args);
	if (!engine)
		return -EINVAL;
1602 1603

	if (args->buffer_count < 1) {
1604
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1605 1606 1607
		return -EINVAL;
	}

1608 1609 1610 1611 1612
	if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
		if (!HAS_RESOURCE_STREAMER(dev)) {
			DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
			return -EINVAL;
		}
1613
		if (engine->id != RCS) {
1614
			DRM_DEBUG("RS is not available on %s\n",
1615
				 engine->name);
1616 1617 1618 1619 1620 1621
			return -EINVAL;
		}

		dispatch_flags |= I915_DISPATCH_RS;
	}

1622 1623 1624 1625 1626 1627
	/* Take a local wakeref for preparing to dispatch the execbuf as
	 * we expect to access the hardware fairly frequently in the
	 * process. Upon first dispatch, we acquire another prolonged
	 * wakeref that we hold until the GPU has been idle for at least
	 * 100ms.
	 */
1628 1629
	intel_runtime_pm_get(dev_priv);

1630 1631 1632 1633
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto pre_mutex_err;

1634
	ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1635
	if (IS_ERR(ctx)) {
1636
		mutex_unlock(&dev->struct_mutex);
1637
		ret = PTR_ERR(ctx);
1638
		goto pre_mutex_err;
1639
	}
1640

1641
	i915_gem_context_get(ctx);
1642

1643 1644 1645
	if (ctx->ppgtt)
		vm = &ctx->ppgtt->base;
	else
1646
		vm = &ggtt->base;
1647

1648 1649
	memset(&params_master, 0x00, sizeof(params_master));

B
Ben Widawsky 已提交
1650
	eb = eb_create(args);
1651
	if (eb == NULL) {
1652
		i915_gem_context_put(ctx);
1653 1654 1655 1656 1657
		mutex_unlock(&dev->struct_mutex);
		ret = -ENOMEM;
		goto pre_mutex_err;
	}

1658
	/* Look up object handles */
1659
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
1660 1661
	if (ret)
		goto err;
1662

1663
	/* take note of the batch buffer before we might reorder the lists */
1664
	params->batch = eb_get_batch(eb);
1665

1666
	/* Move the objects en-masse into the GTT, evicting if necessary. */
1667
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1668 1669
	ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
					  &need_relocs);
1670 1671 1672 1673
	if (ret)
		goto err;

	/* The objects are in their final locations, apply the relocations. */
1674
	if (need_relocs)
B
Ben Widawsky 已提交
1675
		ret = i915_gem_execbuffer_relocate(eb);
1676 1677
	if (ret) {
		if (ret == -EFAULT) {
1678 1679
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
								engine,
1680
								eb, exec, ctx);
1681 1682 1683 1684 1685 1686 1687
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
		}
		if (ret)
			goto err;
	}

	/* Set the pending read domains for the batch buffer to COMMAND */
1688
	if (params->batch->obj->base.pending_write_domain) {
1689
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1690 1691 1692 1693
		ret = -EINVAL;
		goto err;
	}

1694
	params->args_batch_start_offset = args->batch_start_offset;
1695
	if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
		struct i915_vma *vma;

		vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
						params->batch->obj,
						eb,
						args->batch_start_offset,
						args->batch_len,
						drm_is_current_master(file));
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
1706 1707
			goto err;
		}
1708

1709
		if (vma) {
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
			/*
			 * Batch parsed and accepted:
			 *
			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
			 * bit from MI_BATCH_BUFFER_START commands issued in
			 * the dispatch_execbuffer implementations. We
			 * specifically don't want that set on batches the
			 * command parser has accepted.
			 */
			dispatch_flags |= I915_DISPATCH_SECURE;
1720
			params->args_batch_start_offset = 0;
1721
			params->batch = vma;
1722
		}
1723 1724
	}

1725
	params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1726

1727 1728
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
B
Ben Widawsky 已提交
1729
	 * hsw should have this fixed, but bdw mucks it up again. */
1730
	if (dispatch_flags & I915_DISPATCH_SECURE) {
1731
		struct drm_i915_gem_object *obj = params->batch->obj;
C
Chris Wilson 已提交
1732
		struct i915_vma *vma;
1733

1734 1735 1736 1737 1738 1739
		/*
		 * So on first glance it looks freaky that we pin the batch here
		 * outside of the reservation loop. But:
		 * - The batch is already pinned into the relevant ppgtt, so we
		 *   already have the backing storage fully allocated.
		 * - No other BO uses the global gtt (well contexts, but meh),
1740
		 *   so we don't really have issues with multiple objects not
1741 1742 1743
		 *   fitting due to fragmentation.
		 * So this is actually safe.
		 */
C
Chris Wilson 已提交
1744 1745 1746
		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
1747
			goto err;
C
Chris Wilson 已提交
1748
		}
1749

C
Chris Wilson 已提交
1750
		params->batch = vma;
1751
	}
1752

1753
	/* Allocate a request for this batch buffer nice and early. */
1754 1755 1756
	params->request = i915_gem_request_alloc(engine, ctx);
	if (IS_ERR(params->request)) {
		ret = PTR_ERR(params->request);
1757
		goto err_batch_unpin;
1758
	}
1759

1760 1761 1762 1763 1764 1765
	/* Whilst this request exists, batch_obj will be on the
	 * active_list, and so will hold the active reference. Only when this
	 * request is retired will the the batch_obj be moved onto the
	 * inactive_list and lose its active reference. Hence we do not need
	 * to explicitly hold another reference here.
	 */
C
Chris Wilson 已提交
1766
	params->request->batch = params->batch;
1767

1768
	ret = i915_gem_request_add_to_client(params->request, file);
1769
	if (ret)
1770
		goto err_request;
1771

1772 1773 1774 1775 1776 1777 1778 1779
	/*
	 * Save assorted stuff away to pass through to *_submission().
	 * NB: This data should be 'persistent' and not local as it will
	 * kept around beyond the duration of the IOCTL once the GPU
	 * scheduler arrives.
	 */
	params->dev                     = dev;
	params->file                    = file;
1780
	params->engine                    = engine;
1781 1782 1783
	params->dispatch_flags          = dispatch_flags;
	params->ctx                     = ctx;

1784
	ret = execbuf_submit(params, args, &eb->vmas);
1785
err_request:
1786
	__i915_add_request(params->request, ret == 0);
1787

1788
err_batch_unpin:
1789 1790 1791 1792 1793 1794
	/*
	 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
	 * batch vma for correctness. For less ugly and less fragility this
	 * needs to be adjusted to also track the ggtt batch vma properly as
	 * active.
	 */
1795
	if (dispatch_flags & I915_DISPATCH_SECURE)
1796
		i915_vma_unpin(params->batch);
1797
err:
1798
	/* the request owns the ref now */
1799
	i915_gem_context_put(ctx);
1800
	eb_destroy(eb);
1801 1802 1803 1804

	mutex_unlock(&dev->struct_mutex);

pre_mutex_err:
1805 1806 1807
	/* intel_gpu_busy should also get a ref, so it will free when the device
	 * is really idle. */
	intel_runtime_pm_put(dev_priv);
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
	return ret;
}

/*
 * Legacy execbuffer just creates an exec2 list from the original exec object
 * list array and passes it to the real function.
 */
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
		    struct drm_file *file)
{
	struct drm_i915_gem_execbuffer *args = data;
	struct drm_i915_gem_execbuffer2 exec2;
	struct drm_i915_gem_exec_object *exec_list = NULL;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret, i;

	if (args->buffer_count < 1) {
1826
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1827 1828 1829 1830 1831 1832 1833
		return -EINVAL;
	}

	/* Copy in the exec list from userland */
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
	if (exec_list == NULL || exec2_list == NULL) {
1834
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1835 1836 1837 1838 1839 1840
			  args->buffer_count);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -ENOMEM;
	}
	ret = copy_from_user(exec_list,
1841
			     u64_to_user_ptr(args->buffers_ptr),
1842 1843
			     sizeof(*exec_list) * args->buffer_count);
	if (ret != 0) {
1844
		DRM_DEBUG("copy %d exec entries failed %d\n",
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
			  args->buffer_count, ret);
		drm_free_large(exec_list);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

	for (i = 0; i < args->buffer_count; i++) {
		exec2_list[i].handle = exec_list[i].handle;
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
		exec2_list[i].alignment = exec_list[i].alignment;
		exec2_list[i].offset = exec_list[i].offset;
		if (INTEL_INFO(dev)->gen < 4)
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
		else
			exec2_list[i].flags = 0;
	}

	exec2.buffers_ptr = args->buffers_ptr;
	exec2.buffer_count = args->buffer_count;
	exec2.batch_start_offset = args->batch_start_offset;
	exec2.batch_len = args->batch_len;
	exec2.DR1 = args->DR1;
	exec2.DR4 = args->DR4;
	exec2.num_cliprects = args->num_cliprects;
	exec2.cliprects_ptr = args->cliprects_ptr;
	exec2.flags = I915_EXEC_RENDER;
1872
	i915_execbuffer2_set_context_id(exec2, 0);
1873

1874
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1875
	if (!ret) {
1876
		struct drm_i915_gem_exec_object __user *user_exec_list =
1877
			u64_to_user_ptr(args->buffers_ptr);
1878

1879
		/* Copy the new buffer offsets back to the user's exec list. */
1880
		for (i = 0; i < args->buffer_count; i++) {
1881 1882
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user (%d)\n",
					  args->buffer_count, ret);
				break;
			}
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
		}
	}

	drm_free_large(exec_list);
	drm_free_large(exec2_list);
	return ret;
}

int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
		     struct drm_file *file)
{
	struct drm_i915_gem_execbuffer2 *args = data;
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
	int ret;

1909 1910
	if (args->buffer_count < 1 ||
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1911
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1912 1913 1914
		return -EINVAL;
	}

1915 1916 1917 1918 1919
	if (args->rsvd2 != 0) {
		DRM_DEBUG("dirty rvsd2 field\n");
		return -EINVAL;
	}

1920 1921 1922
	exec2_list = drm_malloc_gfp(args->buffer_count,
				    sizeof(*exec2_list),
				    GFP_TEMPORARY);
1923
	if (exec2_list == NULL) {
1924
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1925 1926 1927 1928
			  args->buffer_count);
		return -ENOMEM;
	}
	ret = copy_from_user(exec2_list,
1929
			     u64_to_user_ptr(args->buffers_ptr),
1930 1931
			     sizeof(*exec2_list) * args->buffer_count);
	if (ret != 0) {
1932
		DRM_DEBUG("copy %d exec entries failed %d\n",
1933 1934 1935 1936 1937
			  args->buffer_count, ret);
		drm_free_large(exec2_list);
		return -EFAULT;
	}

1938
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1939 1940
	if (!ret) {
		/* Copy the new buffer offsets back to the user's exec list. */
1941
		struct drm_i915_gem_exec_object2 __user *user_exec_list =
1942
				   u64_to_user_ptr(args->buffers_ptr);
1943 1944 1945
		int i;

		for (i = 0; i < args->buffer_count; i++) {
1946 1947
			exec2_list[i].offset =
				gen8_canonical_addr(exec2_list[i].offset);
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
			ret = __copy_to_user(&user_exec_list[i].offset,
					     &exec2_list[i].offset,
					     sizeof(user_exec_list[i].offset));
			if (ret) {
				ret = -EFAULT;
				DRM_DEBUG("failed to copy %d exec entries "
					  "back to user\n",
					  args->buffer_count);
				break;
			}
1958 1959 1960 1961 1962 1963
		}
	}

	drm_free_large(exec2_list);
	return ret;
}