i915_gem.c 147.9 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_gem_clflush.h"
33
#include "i915_vgpu.h"
C
Chris Wilson 已提交
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36
#include "intel_frontbuffer.h"
37
#include "intel_mocs.h"
M
Matthew Auld 已提交
38
#include "i915_gemfs.h"
39
#include <linux/dma-fence-array.h>
40
#include <linux/kthread.h>
41
#include <linux/reservation.h>
42
#include <linux/shmem_fs.h>
43
#include <linux/slab.h>
44
#include <linux/stop_machine.h>
45
#include <linux/swap.h>
J
Jesse Barnes 已提交
46
#include <linux/pci.h>
47
#include <linux/dma-buf.h>
48

49
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
50

51 52
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
53
	if (obj->cache_dirty)
54 55
		return false;

56
	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
57 58
		return true;

59
	return obj->pin_global; /* currently in use by HW, keep flushed */
60 61
}

62
static int
63
insert_mappable_node(struct i915_ggtt *ggtt,
64 65 66
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
67 68 69 70
	return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
					   size, 0, I915_COLOR_UNEVICTABLE,
					   0, ggtt->mappable_end,
					   DRM_MM_INSERT_LOW);
71 72 73 74 75 76 77 78
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

79 80
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
81
				  u64 size)
82
{
83
	spin_lock(&dev_priv->mm.object_stat_lock);
84 85
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
86
	spin_unlock(&dev_priv->mm.object_stat_lock);
87 88 89
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
90
				     u64 size)
91
{
92
	spin_lock(&dev_priv->mm.object_stat_lock);
93 94
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
95
	spin_unlock(&dev_priv->mm.object_stat_lock);
96 97
}

98
static int
99
i915_gem_wait_for_error(struct i915_gpu_error *error)
100 101 102
{
	int ret;

103 104
	might_sleep();

105 106 107 108 109
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
110
	ret = wait_event_interruptible_timeout(error->reset_queue,
111
					       !i915_reset_backoff(error),
112
					       I915_RESET_TIMEOUT);
113 114 115 116
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
117
		return ret;
118 119
	} else {
		return 0;
120
	}
121 122
}

123
int i915_mutex_lock_interruptible(struct drm_device *dev)
124
{
125
	struct drm_i915_private *dev_priv = to_i915(dev);
126 127
	int ret;

128
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
129 130 131 132 133 134 135 136 137
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
138

139 140
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
141
			    struct drm_file *file)
142
{
143
	struct drm_i915_private *dev_priv = to_i915(dev);
144
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
145
	struct drm_i915_gem_get_aperture *args = data;
146
	struct i915_vma *vma;
147
	u64 pinned;
148

149
	pinned = ggtt->base.reserved;
150
	mutex_lock(&dev->struct_mutex);
151
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
152
		if (i915_vma_is_pinned(vma))
153
			pinned += vma->node.size;
154
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
155
		if (i915_vma_is_pinned(vma))
156
			pinned += vma->node.size;
157
	mutex_unlock(&dev->struct_mutex);
158

159
	args->aper_size = ggtt->base.total;
160
	args->aper_available_size = args->aper_size - pinned;
161

162 163 164
	return 0;
}

165
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
166
{
167
	struct address_space *mapping = obj->base.filp->f_mapping;
168
	drm_dma_handle_t *phys;
169 170
	struct sg_table *st;
	struct scatterlist *sg;
171
	char *vaddr;
172
	int i;
173
	int err;
174

175
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
176
		return -EINVAL;
177

178 179 180 181 182
	/* Always aligning to the object size, allows a single allocation
	 * to handle all possible callers, and given typical object sizes,
	 * the alignment of the buddy allocation will naturally match.
	 */
	phys = drm_pci_alloc(obj->base.dev,
183
			     roundup_pow_of_two(obj->base.size),
184 185
			     roundup_pow_of_two(obj->base.size));
	if (!phys)
186
		return -ENOMEM;
187 188

	vaddr = phys->vaddr;
189 190 191 192 193
	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
194
		if (IS_ERR(page)) {
195
			err = PTR_ERR(page);
196 197
			goto err_phys;
		}
198 199 200 201 202 203

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

204
		put_page(page);
205 206 207
		vaddr += PAGE_SIZE;
	}

208
	i915_gem_chipset_flush(to_i915(obj->base.dev));
209 210

	st = kmalloc(sizeof(*st), GFP_KERNEL);
211
	if (!st) {
212
		err = -ENOMEM;
213 214
		goto err_phys;
	}
215 216 217

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
218
		err = -ENOMEM;
219
		goto err_phys;
220 221 222 223 224
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
225

226
	sg_dma_address(sg) = phys->busaddr;
227 228
	sg_dma_len(sg) = obj->base.size;

229
	obj->phys_handle = phys;
230

231
	__i915_gem_object_set_pages(obj, st, sg->length);
232 233

	return 0;
234 235 236

err_phys:
	drm_pci_free(obj->base.dev, phys);
237 238

	return err;
239 240
}

241 242 243 244 245 246 247 248
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	if (cpu_write_needs_clflush(obj))
		obj->cache_dirty = true;
}

249
static void
250
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
251 252
				struct sg_table *pages,
				bool needs_clflush)
253
{
C
Chris Wilson 已提交
254
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
255

C
Chris Wilson 已提交
256 257
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
258

259 260
	if (needs_clflush &&
	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
261
	    !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
262
		drm_clflush_sg(pages);
263

264
	__start_cpu_write(obj);
265 266 267 268 269 270
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
271
	__i915_gem_object_release_shmem(obj, pages, false);
272

C
Chris Wilson 已提交
273
	if (obj->mm.dirty) {
274
		struct address_space *mapping = obj->base.filp->f_mapping;
275
		char *vaddr = obj->phys_handle->vaddr;
276 277 278
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
279 280 281 282 283 284 285 286 287 288 289 290 291
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
C
Chris Wilson 已提交
292
			if (obj->mm.madv == I915_MADV_WILLNEED)
293
				mark_page_accessed(page);
294
			put_page(page);
295 296
			vaddr += PAGE_SIZE;
		}
C
Chris Wilson 已提交
297
		obj->mm.dirty = false;
298 299
	}

300 301
	sg_free_table(pages);
	kfree(pages);
302 303

	drm_pci_free(obj->base.dev, obj->phys_handle);
304 305 306 307 308
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
C
Chris Wilson 已提交
309
	i915_gem_object_unpin_pages(obj);
310 311 312 313 314 315 316 317
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

318 319
static const struct drm_i915_gem_object_ops i915_gem_object_ops;

320
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
321 322 323
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
324 325 326
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
327

328 329 330 331
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
332
	 */
333 334 335 336 337 338
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
339 340 341 342 343
	if (ret)
		return ret;

	i915_gem_retire_requests(to_i915(obj->base.dev));

344 345 346 347 348 349 350 351 352 353 354 355 356
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

357 358 359 360
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   long timeout,
361
			   struct intel_rps_client *rps_client)
362
{
363
	struct drm_i915_gem_request *rq;
364

365
	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
366

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return timeout;

	if (!dma_fence_is_i915(fence))
		return dma_fence_wait_timeout(fence,
					      flags & I915_WAIT_INTERRUPTIBLE,
					      timeout);

	rq = to_request(fence);
	if (i915_gem_request_completed(rq))
		goto out;

	/* This client is about to stall waiting for the GPU. In many cases
	 * this is undesirable and limits the throughput of the system, as
	 * many clients cannot continue processing user input/output whilst
	 * blocked. RPS autotuning may take tens of milliseconds to respond
	 * to the GPU load and thus incurs additional latency for the client.
	 * We can circumvent that by promoting the GPU frequency to maximum
	 * before we wait. This makes the GPU throttle up much more quickly
	 * (good for benchmarks and user experience, e.g. window animations),
	 * but at a cost of spending more power processing the workload
	 * (bad for battery). Not all clients even want their results
	 * immediately and for them we should just let the GPU select its own
	 * frequency to maximise efficiency. To prevent a single client from
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 */
394
	if (rps_client) {
395
		if (INTEL_GEN(rq->i915) >= 6)
396
			gen6_rps_boost(rq, rps_client);
397
		else
398
			rps_client = NULL;
399 400
	}

401 402 403 404 405 406 407 408 409 410 411 412 413
	timeout = i915_wait_request(rq, flags, timeout);

out:
	if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
		i915_gem_request_retire_upto(rq);

	return timeout;
}

static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 long timeout,
414
				 struct intel_rps_client *rps_client)
415
{
416
	unsigned int seq = __read_seqcount_begin(&resv->seq);
417
	struct dma_fence *excl;
418
	bool prune_fences = false;
419 420 421 422

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
423 424
		int ret;

425 426
		ret = reservation_object_get_fences_rcu(resv,
							&excl, &count, &shared);
427 428 429
		if (ret)
			return ret;

430 431 432
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
433
							     rps_client);
434
			if (timeout < 0)
435
				break;
436

437 438 439 440 441 442
			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
443 444

		prune_fences = count && timeout >= 0;
445 446
	} else {
		excl = reservation_object_get_excl_rcu(resv);
447 448
	}

449
	if (excl && timeout >= 0) {
450 451
		timeout = i915_gem_object_wait_fence(excl, flags, timeout,
						     rps_client);
452 453
		prune_fences = timeout >= 0;
	}
454 455 456

	dma_fence_put(excl);

457 458 459 460
	/* Oportunistically prune the fences iff we know they have *all* been
	 * signaled and that the reservation object has not been changed (i.e.
	 * no new fences have been added).
	 */
461
	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
462 463 464 465 466
		if (reservation_object_trylock(resv)) {
			if (!__read_seqcount_retry(&resv->seq, seq))
				reservation_object_add_excl_fence(resv, NULL);
			reservation_object_unlock(resv);
		}
467 468
	}

469
	return timeout;
470 471
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
static void __fence_set_priority(struct dma_fence *fence, int prio)
{
	struct drm_i915_gem_request *rq;
	struct intel_engine_cs *engine;

	if (!dma_fence_is_i915(fence))
		return;

	rq = to_request(fence);
	engine = rq->engine;
	if (!engine->schedule)
		return;

	engine->schedule(rq, prio);
}

static void fence_set_priority(struct dma_fence *fence, int prio)
{
	/* Recurse once into a fence-array */
	if (dma_fence_is_array(fence)) {
		struct dma_fence_array *array = to_dma_fence_array(fence);
		int i;

		for (i = 0; i < array->num_fences; i++)
			__fence_set_priority(array->fences[i], prio);
	} else {
		__fence_set_priority(fence, prio);
	}
}

int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
			      unsigned int flags,
			      int prio)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
		int ret;

		ret = reservation_object_get_fences_rcu(obj->resv,
							&excl, &count, &shared);
		if (ret)
			return ret;

		for (i = 0; i < count; i++) {
			fence_set_priority(shared[i], prio);
			dma_fence_put(shared[i]);
		}

		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(obj->resv);
	}

	if (excl) {
		fence_set_priority(excl, prio);
		dma_fence_put(excl);
	}
	return 0;
}

536 537 538 539 540 541
/**
 * Waits for rendering to the object to be completed
 * @obj: i915 gem object
 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
 * @timeout: how long to wait
 * @rps: client (user process) to charge for any waitboosting
542
 */
543 544 545 546
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     long timeout,
547
		     struct intel_rps_client *rps_client)
548
{
549 550 551 552 553 554 555
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
		   !!(flags & I915_WAIT_LOCKED));
#endif
	GEM_BUG_ON(timeout < 0);
556

557 558
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
559
						   rps_client);
560
	return timeout < 0 ? timeout : 0;
561 562 563 564 565 566
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

567
	return &fpriv->rps_client;
568 569
}

570 571 572
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
573
		     struct drm_file *file)
574 575
{
	void *vaddr = obj->phys_handle->vaddr + args->offset;
576
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
577 578 579 580

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
581
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
582 583
	if (copy_from_user(vaddr, user_data, args->size))
		return -EFAULT;
584

585
	drm_clflush_virt_range(vaddr, args->size);
586
	i915_gem_chipset_flush(to_i915(obj->base.dev));
587

588
	intel_fb_obj_flush(obj, ORIGIN_CPU);
589
	return 0;
590 591
}

592
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
593
{
594
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
595 596 597 598
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
599
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
600
	kmem_cache_free(dev_priv->objects, obj);
601 602
}

603 604
static int
i915_gem_create(struct drm_file *file,
605
		struct drm_i915_private *dev_priv,
606 607
		uint64_t size,
		uint32_t *handle_p)
608
{
609
	struct drm_i915_gem_object *obj;
610 611
	int ret;
	u32 handle;
612

613
	size = roundup(size, PAGE_SIZE);
614 615
	if (size == 0)
		return -EINVAL;
616 617

	/* Allocate the new object */
618
	obj = i915_gem_object_create(dev_priv, size);
619 620
	if (IS_ERR(obj))
		return PTR_ERR(obj);
621

622
	ret = drm_gem_handle_create(file, &obj->base, &handle);
623
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
624
	i915_gem_object_put(obj);
625 626
	if (ret)
		return ret;
627

628
	*handle_p = handle;
629 630 631
	return 0;
}

632 633 634 635 636 637
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
638
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
639
	args->size = args->pitch * args->height;
640
	return i915_gem_create(file, to_i915(dev),
641
			       args->size, &args->handle);
642 643
}

644 645 646 647 648 649
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	return !(obj->cache_level == I915_CACHE_NONE ||
		 obj->cache_level == I915_CACHE_WT);
}

650 651
/**
 * Creates a new mm object and returns a handle to it.
652 653 654
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
655 656 657 658 659
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
660
	struct drm_i915_private *dev_priv = to_i915(dev);
661
	struct drm_i915_gem_create *args = data;
662

663
	i915_gem_flush_free_objects(dev_priv);
664

665
	return i915_gem_create(file, dev_priv,
666
			       args->size, &args->handle);
667 668
}

669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}

static void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);

	if (!(obj->base.write_domain & flush_domains))
		return;

	/* No actual flushing is required for the GTT write domain.  Writes
	 * to it "immediately" go to main memory as far as we know, so there's
	 * no chipset flush.  It also doesn't land in render cache.
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
	 * system agents we cannot reproduce this behaviour).
	 */
	wmb();

	switch (obj->base.write_domain) {
	case I915_GEM_DOMAIN_GTT:
703
		if (!HAS_LLC(dev_priv)) {
704 705
			intel_runtime_pm_get(dev_priv);
			spin_lock_irq(&dev_priv->uncore.lock);
706
			POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
707 708
			spin_unlock_irq(&dev_priv->uncore.lock);
			intel_runtime_pm_put(dev_priv);
709 710 711 712 713 714 715 716 717
		}

		intel_fb_obj_flush(obj,
				   fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
		break;

	case I915_GEM_DOMAIN_CPU:
		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
		break;
718 719 720 721 722

	case I915_GEM_DOMAIN_RENDER:
		if (gpu_write_needs_clflush(obj))
			obj->cache_dirty = true;
		break;
723 724 725 726 727
	}

	obj->base.write_domain = 0;
}

728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

754
static inline int
755 756
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

780 781 782 783 784 785
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
786
				    unsigned int *needs_clflush)
787 788 789
{
	int ret;

790
	lockdep_assert_held(&obj->base.dev->struct_mutex);
791

792
	*needs_clflush = 0;
793 794
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
795

796 797 798 799 800
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
801 802 803
	if (ret)
		return ret;

C
Chris Wilson 已提交
804
	ret = i915_gem_object_pin_pages(obj);
805 806 807
	if (ret)
		return ret;

808 809
	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
810 811 812 813 814 815 816
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
		if (ret)
			goto err_unpin;
		else
			goto out;
	}

817
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
818

819 820 821 822 823
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
824 825
	if (!obj->cache_dirty &&
	    !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
826
		*needs_clflush = CLFLUSH_BEFORE;
827

828
out:
829
	/* return with the pages pinned */
830
	return 0;
831 832 833 834

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
835 836 837 838 839 840 841
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

842 843
	lockdep_assert_held(&obj->base.dev->struct_mutex);

844 845 846 847
	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

848 849 850 851 852 853
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
854 855 856
	if (ret)
		return ret;

C
Chris Wilson 已提交
857
	ret = i915_gem_object_pin_pages(obj);
858 859 860
	if (ret)
		return ret;

861 862
	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
863 864 865 866 867 868 869
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
		if (ret)
			goto err_unpin;
		else
			goto out;
	}

870
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
871

872 873 874 875 876
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
877
	if (!obj->cache_dirty) {
878
		*needs_clflush |= CLFLUSH_AFTER;
879

880 881 882 883 884 885 886
		/*
		 * Same trick applies to invalidate partially written
		 * cachelines read before writing.
		 */
		if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
			*needs_clflush |= CLFLUSH_BEFORE;
	}
887

888
out:
889
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
C
Chris Wilson 已提交
890
	obj->mm.dirty = true;
891
	/* return with the pages pinned */
892
	return 0;
893 894 895 896

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
897 898
}

899 900 901 902
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
903
	if (unlikely(swizzled)) {
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

921 922 923
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
924
shmem_pread_slow(struct page *page, int offset, int length,
925 926 927 928 929 930 931 932
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
933
		shmem_clflush_swizzled_range(vaddr + offset, length,
934
					     page_do_bit17_swizzling);
935 936

	if (page_do_bit17_swizzling)
937
		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
938
	else
939
		ret = __copy_to_user(user_data, vaddr + offset, length);
940 941
	kunmap(page);

942
	return ret ? - EFAULT : 0;
943 944
}

945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
	    bool page_do_bit17_swizzling, bool needs_clflush)
{
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush)
			drm_clflush_virt_range(vaddr + offset, length);
		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return 0;

	return shmem_pread_slow(page, offset, length, user_data,
				page_do_bit17_swizzling, needs_clflush);
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	char __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int needs_clflush;
	unsigned int idx, offset;
	int ret;

	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);

	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
	mutex_unlock(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;

		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;

		ret = shmem_pread(page, offset, length, user_data,
				  page_to_phys(page) & obj_do_bit17_swizzling,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

	i915_gem_obj_finish_shmem_access(obj);
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
1021
{
1022
	void __iomem *vaddr;
1023
	unsigned long unwritten;
1024 1025

	/* We can use the cpu mem copy function because this is X86. */
1026 1027 1028 1029
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data,
					    (void __force *)vaddr + offset,
					    length);
1030 1031
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
1032 1033 1034 1035
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data,
					 (void __force *)vaddr + offset,
					 length);
1036 1037
		io_mapping_unmap(vaddr);
	}
1038 1039 1040 1041
	return unwritten;
}

static int
1042 1043
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
1044
{
1045 1046
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
1047
	struct drm_mm_node node;
1048 1049 1050
	struct i915_vma *vma;
	void __user *user_data;
	u64 remain, offset;
1051 1052
	int ret;

1053 1054 1055 1056 1057 1058
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1059 1060 1061
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
1062 1063 1064
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1065
		ret = i915_vma_put_fence(vma);
1066 1067 1068 1069 1070
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1071
	if (IS_ERR(vma)) {
1072
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1073
		if (ret)
1074 1075
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1076 1077 1078 1079 1080 1081
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

1082
	mutex_unlock(&i915->drm.struct_mutex);
1083

1084 1085 1086
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1103
					       node.start, I915_CACHE_NONE, 0);
1104 1105 1106 1107
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
1108 1109 1110

		if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
				  user_data, page_length)) {
1111 1112 1113 1114 1115 1116 1117 1118 1119
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

1120
	mutex_lock(&i915->drm.struct_mutex);
1121 1122 1123 1124
out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1125
				       node.start, node.size);
1126 1127
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1128
		i915_vma_unpin(vma);
1129
	}
1130 1131 1132
out_unlock:
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1133

1134 1135 1136
	return ret;
}

1137 1138
/**
 * Reads data from the object referenced by handle.
1139 1140 1141
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1142 1143 1144 1145 1146
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1147
		     struct drm_file *file)
1148 1149
{
	struct drm_i915_gem_pread *args = data;
1150
	struct drm_i915_gem_object *obj;
1151
	int ret;
1152

1153 1154 1155 1156
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1157
		       u64_to_user_ptr(args->data_ptr),
1158 1159 1160
		       args->size))
		return -EFAULT;

1161
	obj = i915_gem_object_lookup(file, args->handle);
1162 1163
	if (!obj)
		return -ENOENT;
1164

1165
	/* Bounds check source.  */
1166
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1167
		ret = -EINVAL;
1168
		goto out;
C
Chris Wilson 已提交
1169 1170
	}

C
Chris Wilson 已提交
1171 1172
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1173 1174 1175 1176
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1177
	if (ret)
1178
		goto out;
1179

1180
	ret = i915_gem_object_pin_pages(obj);
1181
	if (ret)
1182
		goto out;
1183

1184
	ret = i915_gem_shmem_pread(obj, args);
1185
	if (ret == -EFAULT || ret == -ENODEV)
1186
		ret = i915_gem_gtt_pread(obj, args);
1187

1188 1189
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
1190
	i915_gem_object_put(obj);
1191
	return ret;
1192 1193
}

1194 1195
/* This is the fast write path which cannot handle
 * page faults in the source data
1196
 */
1197

1198 1199 1200 1201
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
1202
{
1203
	void __iomem *vaddr;
1204
	unsigned long unwritten;
1205

1206
	/* We can use the cpu mem copy function because this is X86. */
1207 1208
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1209
						      user_data, length);
1210 1211
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
1212 1213 1214
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user((void __force *)vaddr + offset,
					   user_data, length);
1215 1216
		io_mapping_unmap(vaddr);
	}
1217 1218 1219 1220

	return unwritten;
}

1221 1222 1223
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1224
 * @obj: i915 GEM object
1225
 * @args: pwrite arguments structure
1226
 */
1227
static int
1228 1229
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
1230
{
1231
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1232 1233
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_mm_node node;
1234 1235 1236
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
1237
	int ret;
1238

1239 1240 1241
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
1242

1243
	intel_runtime_pm_get(i915);
C
Chris Wilson 已提交
1244
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1245 1246 1247
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
1248 1249 1250
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1251
		ret = i915_vma_put_fence(vma);
1252 1253 1254 1255 1256
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1257
	if (IS_ERR(vma)) {
1258
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1259
		if (ret)
1260 1261
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1262
	}
D
Daniel Vetter 已提交
1263 1264 1265 1266 1267

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1268 1269
	mutex_unlock(&i915->drm.struct_mutex);

1270
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1271

1272 1273 1274 1275
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1276 1277
		/* Operation in this page
		 *
1278 1279 1280
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1281
		 */
1282
		u32 page_base = node.start;
1283 1284
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1295
		/* If we get a fault while copying data, then (presumably) our
1296 1297
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1298 1299
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1300
		 */
1301 1302 1303 1304
		if (ggtt_write(&ggtt->mappable, page_base, page_offset,
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
1305
		}
1306

1307 1308 1309
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1310
	}
1311
	intel_fb_obj_flush(obj, ORIGIN_CPU);
1312 1313

	mutex_lock(&i915->drm.struct_mutex);
D
Daniel Vetter 已提交
1314
out_unpin:
1315 1316 1317
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1318
				       node.start, node.size);
1319 1320
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1321
		i915_vma_unpin(vma);
1322
	}
1323
out_unlock:
1324
	intel_runtime_pm_put(i915);
1325
	mutex_unlock(&i915->drm.struct_mutex);
1326
	return ret;
1327 1328
}

1329
static int
1330
shmem_pwrite_slow(struct page *page, int offset, int length,
1331 1332 1333 1334
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1335
{
1336 1337
	char *vaddr;
	int ret;
1338

1339
	vaddr = kmap(page);
1340
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1341
		shmem_clflush_swizzled_range(vaddr + offset, length,
1342
					     page_do_bit17_swizzling);
1343
	if (page_do_bit17_swizzling)
1344 1345
		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
						length);
1346
	else
1347
		ret = __copy_from_user(vaddr + offset, user_data, length);
1348
	if (needs_clflush_after)
1349
		shmem_clflush_swizzled_range(vaddr + offset, length,
1350
					     page_do_bit17_swizzling);
1351
	kunmap(page);
1352

1353
	return ret ? -EFAULT : 0;
1354 1355
}

1356 1357 1358 1359 1360
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
1361
static int
1362 1363 1364 1365
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool page_do_bit17_swizzling,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
1366
{
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush_before)
			drm_clflush_virt_range(vaddr + offset, len);
		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
		if (needs_clflush_after)
			drm_clflush_virt_range(vaddr + offset, len);

		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return ret;

	return shmem_pwrite_slow(page, offset, len, user_data,
				 page_do_bit17_swizzling,
				 needs_clflush_before,
				 needs_clflush_after);
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	void __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int partial_cacheline_write;
1399
	unsigned int needs_clflush;
1400 1401
	unsigned int offset, idx;
	int ret;
1402

1403
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1404 1405 1406
	if (ret)
		return ret;

1407 1408 1409 1410
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	mutex_unlock(&i915->drm.struct_mutex);
	if (ret)
		return ret;
1411

1412 1413 1414
	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);
1415

1416 1417 1418 1419 1420 1421 1422
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1423

1424 1425 1426 1427 1428 1429
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;
1430

1431 1432 1433
		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;
1434

1435 1436 1437 1438
		ret = shmem_pwrite(page, offset, length, user_data,
				   page_to_phys(page) & obj_do_bit17_swizzling,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
1439
		if (ret)
1440
			break;
1441

1442 1443 1444
		remain -= length;
		user_data += length;
		offset = 0;
1445
	}
1446

1447
	intel_fb_obj_flush(obj, ORIGIN_CPU);
1448
	i915_gem_obj_finish_shmem_access(obj);
1449
	return ret;
1450 1451 1452 1453
}

/**
 * Writes data to the object referenced by handle.
1454 1455 1456
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1457 1458 1459 1460 1461
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1462
		      struct drm_file *file)
1463 1464
{
	struct drm_i915_gem_pwrite *args = data;
1465
	struct drm_i915_gem_object *obj;
1466 1467 1468 1469 1470 1471
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1472
		       u64_to_user_ptr(args->data_ptr),
1473 1474 1475
		       args->size))
		return -EFAULT;

1476
	obj = i915_gem_object_lookup(file, args->handle);
1477 1478
	if (!obj)
		return -ENOENT;
1479

1480
	/* Bounds check destination. */
1481
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1482
		ret = -EINVAL;
1483
		goto err;
C
Chris Wilson 已提交
1484 1485
	}

C
Chris Wilson 已提交
1486 1487
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1488 1489 1490 1491 1492 1493
	ret = -ENODEV;
	if (obj->ops->pwrite)
		ret = obj->ops->pwrite(obj, args);
	if (ret != -ENODEV)
		goto err;

1494 1495 1496 1497 1498
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1499 1500 1501
	if (ret)
		goto err;

1502
	ret = i915_gem_object_pin_pages(obj);
1503
	if (ret)
1504
		goto err;
1505

D
Daniel Vetter 已提交
1506
	ret = -EFAULT;
1507 1508 1509 1510 1511 1512
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1513
	if (!i915_gem_object_has_struct_page(obj) ||
1514
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1515 1516
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1517 1518
		 * textures). Fallback to the shmem path in that case.
		 */
1519
		ret = i915_gem_gtt_pwrite_fast(obj, args);
1520

1521
	if (ret == -EFAULT || ret == -ENOSPC) {
1522 1523
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1524
		else
1525
			ret = i915_gem_shmem_pwrite(obj, args);
1526
	}
1527

1528
	i915_gem_object_unpin_pages(obj);
1529
err:
C
Chris Wilson 已提交
1530
	i915_gem_object_put(obj);
1531
	return ret;
1532 1533
}

1534 1535 1536 1537 1538 1539
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct list_head *list;
	struct i915_vma *vma;

1540 1541
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

1542 1543
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
1544
			break;
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555

		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}

	i915 = to_i915(obj->base.dev);
1556
	spin_lock(&i915->mm.obj_lock);
1557
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1558 1559
	list_move_tail(&obj->mm.link, list);
	spin_unlock(&i915->mm.obj_lock);
1560 1561
}

1562
/**
1563 1564
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1565 1566 1567
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1568 1569 1570
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1571
			  struct drm_file *file)
1572 1573
{
	struct drm_i915_gem_set_domain *args = data;
1574
	struct drm_i915_gem_object *obj;
1575 1576
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1577
	int err;
1578

1579
	/* Only handle setting domains to types used by the CPU. */
1580
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1581 1582 1583 1584 1585 1586 1587 1588
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1589
	obj = i915_gem_object_lookup(file, args->handle);
1590 1591
	if (!obj)
		return -ENOENT;
1592

1593 1594 1595 1596
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1597
	err = i915_gem_object_wait(obj,
1598 1599 1600 1601
				   I915_WAIT_INTERRUPTIBLE |
				   (write_domain ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1602
	if (err)
C
Chris Wilson 已提交
1603
		goto out;
1604

1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	err = i915_gem_object_pin_pages(obj);
	if (err)
C
Chris Wilson 已提交
1615
		goto out;
1616 1617 1618

	err = i915_mutex_lock_interruptible(dev);
	if (err)
C
Chris Wilson 已提交
1619
		goto out_unpin;
1620

1621 1622 1623 1624
	if (read_domains & I915_GEM_DOMAIN_WC)
		err = i915_gem_object_set_to_wc_domain(obj, write_domain);
	else if (read_domains & I915_GEM_DOMAIN_GTT)
		err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1625
	else
1626
		err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1627

1628 1629
	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
1630

1631
	mutex_unlock(&dev->struct_mutex);
1632

1633
	if (write_domain != 0)
1634 1635
		intel_fb_obj_invalidate(obj,
					fb_write_origin(obj, write_domain));
1636

C
Chris Wilson 已提交
1637
out_unpin:
1638
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
1639 1640
out:
	i915_gem_object_put(obj);
1641
	return err;
1642 1643 1644 1645
}

/**
 * Called when user space has done writes to this buffer
1646 1647 1648
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1649 1650 1651
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1652
			 struct drm_file *file)
1653 1654
{
	struct drm_i915_gem_sw_finish *args = data;
1655
	struct drm_i915_gem_object *obj;
1656

1657
	obj = i915_gem_object_lookup(file, args->handle);
1658 1659
	if (!obj)
		return -ENOENT;
1660 1661

	/* Pinned buffers may be scanout, so flush the cache */
1662
	i915_gem_object_flush_if_display(obj);
C
Chris Wilson 已提交
1663
	i915_gem_object_put(obj);
1664 1665

	return 0;
1666 1667 1668
}

/**
1669 1670 1671 1672 1673
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1674 1675 1676
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1687 1688 1689
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1690
		    struct drm_file *file)
1691 1692
{
	struct drm_i915_gem_mmap *args = data;
1693
	struct drm_i915_gem_object *obj;
1694 1695
	unsigned long addr;

1696 1697 1698
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1699
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1700 1701
		return -ENODEV;

1702 1703
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1704
		return -ENOENT;
1705

1706 1707 1708
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1709
	if (!obj->base.filp) {
C
Chris Wilson 已提交
1710
		i915_gem_object_put(obj);
1711 1712 1713
		return -EINVAL;
	}

1714
	addr = vm_mmap(obj->base.filp, 0, args->size,
1715 1716
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1717 1718 1719 1720
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1721
		if (down_write_killable(&mm->mmap_sem)) {
C
Chris Wilson 已提交
1722
			i915_gem_object_put(obj);
1723 1724
			return -EINTR;
		}
1725 1726 1727 1728 1729 1730 1731
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1732 1733

		/* This may race, but that's ok, it only gets set */
1734
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1735
	}
C
Chris Wilson 已提交
1736
	i915_gem_object_put(obj);
1737 1738 1739 1740 1741 1742 1743 1744
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1745 1746
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
1747
	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1748 1749
}

1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
1770 1771 1772
 * 2 - Recognise WC as a separate cache domain so that we can flush the
 *     delayed writes via GTT before performing direct access via WC.
 *
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
1800
	return 2;
1801 1802
}

1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
static inline struct i915_ggtt_view
compute_partial_view(struct drm_i915_gem_object *obj,
		     pgoff_t page_offset,
		     unsigned int chunk)
{
	struct i915_ggtt_view view;

	if (i915_gem_object_is_tiled(obj))
		chunk = roundup(chunk, tile_row_pages(obj));

	view.type = I915_GGTT_VIEW_PARTIAL;
1814 1815
	view.partial.offset = rounddown(page_offset, chunk);
	view.partial.size =
1816
		min_t(unsigned int, chunk,
1817
		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1818 1819 1820 1821 1822 1823 1824 1825

	/* If the partial covers the entire object, just create a normal VMA. */
	if (chunk >= obj->base.size >> PAGE_SHIFT)
		view.type = I915_GGTT_VIEW_NORMAL;

	return view;
}

1826 1827
/**
 * i915_gem_fault - fault a page into the GTT
1828
 * @vmf: fault info
1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1840 1841 1842
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1843
 */
1844
int i915_gem_fault(struct vm_fault *vmf)
1845
{
1846
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1847
	struct vm_area_struct *area = vmf->vma;
C
Chris Wilson 已提交
1848
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1849
	struct drm_device *dev = obj->base.dev;
1850 1851
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1852
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
1853
	struct i915_vma *vma;
1854
	pgoff_t page_offset;
1855
	unsigned int flags;
1856
	int ret;
1857

1858
	/* We don't use vmf->pgoff since that has the fake offset */
1859
	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1860

C
Chris Wilson 已提交
1861 1862
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1863
	/* Try to flush the object off the GPU first without holding the lock.
1864
	 * Upon acquiring the lock, we will perform our sanity checks and then
1865 1866 1867
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
1868 1869 1870 1871
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
1872
	if (ret)
1873 1874
		goto err;

1875 1876 1877 1878
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

1879 1880 1881 1882 1883
	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
1884

1885
	/* Access to snoopable pages through the GTT is incoherent. */
1886
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1887
		ret = -EFAULT;
1888
		goto err_unlock;
1889 1890
	}

1891 1892 1893 1894 1895 1896 1897 1898
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

1899
	/* Now pin it into the GTT as needed */
1900
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1901 1902
	if (IS_ERR(vma)) {
		/* Use a partial view if it is bigger than available space */
1903
		struct i915_ggtt_view view =
1904
			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1905

1906 1907 1908 1909 1910
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

1911 1912
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
1913 1914
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
1915
		goto err_unlock;
C
Chris Wilson 已提交
1916
	}
1917

1918 1919
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
1920
		goto err_unpin;
1921

1922
	ret = i915_vma_pin_fence(vma);
1923
	if (ret)
1924
		goto err_unpin;
1925

1926
	/* Finally, remap it using the new GTT offset */
1927
	ret = remap_io_mapping(area,
1928
			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1929 1930 1931
			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);
1932 1933
	if (ret)
		goto err_fence;
1934

1935 1936 1937 1938 1939 1940 1941
	/* Mark as being mmapped into userspace for later revocation */
	assert_rpm_wakelock_held(dev_priv);
	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
	GEM_BUG_ON(!obj->userfault_count);

err_fence:
1942
	i915_vma_unpin_fence(vma);
1943
err_unpin:
C
Chris Wilson 已提交
1944
	__i915_vma_unpin(vma);
1945
err_unlock:
1946
	mutex_unlock(&dev->struct_mutex);
1947 1948
err_rpm:
	intel_runtime_pm_put(dev_priv);
1949
	i915_gem_object_unpin_pages(obj);
1950
err:
1951
	switch (ret) {
1952
	case -EIO:
1953 1954 1955 1956 1957 1958 1959
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1960 1961 1962
			ret = VM_FAULT_SIGBUS;
			break;
		}
1963
	case -EAGAIN:
D
Daniel Vetter 已提交
1964 1965 1966 1967
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1968
		 */
1969 1970
	case 0:
	case -ERESTARTSYS:
1971
	case -EINTR:
1972 1973 1974 1975 1976
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1977 1978
		ret = VM_FAULT_NOPAGE;
		break;
1979
	case -ENOMEM:
1980 1981
		ret = VM_FAULT_OOM;
		break;
1982
	case -ENOSPC:
1983
	case -EFAULT:
1984 1985
		ret = VM_FAULT_SIGBUS;
		break;
1986
	default:
1987
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1988 1989
		ret = VM_FAULT_SIGBUS;
		break;
1990
	}
1991
	return ret;
1992 1993
}

1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

	GEM_BUG_ON(!obj->userfault_count);

	obj->userfault_count = 0;
	list_del(&obj->userfault_link);
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
			break;

		i915_vma_unset_userfault(vma);
	}
}

2013 2014 2015 2016
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
2017
 * Preserve the reservation of the mmapping with the DRM core code, but
2018 2019 2020 2021 2022 2023 2024 2025 2026
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
2027
void
2028
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2029
{
2030 2031
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

2032 2033 2034
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
2035 2036 2037 2038
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
2039
	 */
2040
	lockdep_assert_held(&i915->drm.struct_mutex);
2041
	intel_runtime_pm_get(i915);
2042

2043
	if (!obj->userfault_count)
2044
		goto out;
2045

2046
	__i915_gem_object_release_mmap(obj);
2047 2048 2049 2050 2051 2052 2053 2054 2055

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
2056 2057 2058

out:
	intel_runtime_pm_put(i915);
2059 2060
}

2061
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2062
{
2063
	struct drm_i915_gem_object *obj, *on;
2064
	int i;
2065

2066 2067 2068 2069 2070 2071
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
2072

2073
	list_for_each_entry_safe(obj, on,
2074 2075
				 &dev_priv->mm.userfault_list, userfault_link)
		__i915_gem_object_release_mmap(obj);
2076 2077 2078 2079 2080 2081 2082 2083

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

2084 2085 2086 2087 2088 2089 2090 2091 2092 2093
		/* Ideally we want to assert that the fence register is not
		 * live at this point (i.e. that no piece of code will be
		 * trying to write through fence + GTT, as that both violates
		 * our tracking of activity and associated locking/barriers,
		 * but also is illegal given that the hw is powered down).
		 *
		 * Previously we used reg->pin_count as a "liveness" indicator.
		 * That is not sufficient, and we need a more fine-grained
		 * tool if we want to have a sanity check here.
		 */
2094 2095 2096 2097

		if (!reg->vma)
			continue;

2098
		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2099 2100
		reg->dirty = true;
	}
2101 2102
}

2103 2104
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2105
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2106
	int err;
2107

2108
	err = drm_gem_create_mmap_offset(&obj->base);
2109
	if (likely(!err))
2110
		return 0;
2111

2112 2113 2114 2115 2116
	/* Attempt to reap some mmap space from dead objects */
	do {
		err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
		if (err)
			break;
2117

2118
		i915_gem_drain_freed_objects(dev_priv);
2119
		err = drm_gem_create_mmap_offset(&obj->base);
2120 2121 2122 2123
		if (!err)
			break;

	} while (flush_delayed_work(&dev_priv->gt.retire_work));
2124

2125
	return err;
2126 2127 2128 2129 2130 2131 2132
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2133
int
2134 2135
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2136
		  uint32_t handle,
2137
		  uint64_t *offset)
2138
{
2139
	struct drm_i915_gem_object *obj;
2140 2141
	int ret;

2142
	obj = i915_gem_object_lookup(file, handle);
2143 2144
	if (!obj)
		return -ENOENT;
2145

2146
	ret = i915_gem_object_create_mmap_offset(obj);
2147 2148
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2149

C
Chris Wilson 已提交
2150
	i915_gem_object_put(obj);
2151
	return ret;
2152 2153
}

2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2175
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2176 2177
}

D
Daniel Vetter 已提交
2178 2179 2180
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2181
{
2182
	i915_gem_object_free_mmap_offset(obj);
2183

2184 2185
	if (obj->base.filp == NULL)
		return;
2186

D
Daniel Vetter 已提交
2187 2188 2189 2190 2191
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2192
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
C
Chris Wilson 已提交
2193
	obj->mm.madv = __I915_MADV_PURGED;
2194
	obj->mm.pages = ERR_PTR(-EFAULT);
D
Daniel Vetter 已提交
2195
}
2196

2197
/* Try to discard unwanted pages */
2198
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2199
{
2200 2201
	struct address_space *mapping;

2202
	lockdep_assert_held(&obj->mm.lock);
2203
	GEM_BUG_ON(i915_gem_object_has_pages(obj));
2204

C
Chris Wilson 已提交
2205
	switch (obj->mm.madv) {
2206 2207 2208 2209 2210 2211 2212 2213 2214
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2215
	mapping = obj->base.filp->f_mapping,
2216
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2217 2218
}

2219
static void
2220 2221
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
2222
{
2223 2224
	struct sgt_iter sgt_iter;
	struct page *page;
2225

2226
	__i915_gem_object_release_shmem(obj, pages, true);
2227

2228
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
2229

2230
	if (i915_gem_object_needs_bit17_swizzle(obj))
2231
		i915_gem_object_save_bit_17_swizzle(obj, pages);
2232

2233
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
2234
		if (obj->mm.dirty)
2235
			set_page_dirty(page);
2236

C
Chris Wilson 已提交
2237
		if (obj->mm.madv == I915_MADV_WILLNEED)
2238
			mark_page_accessed(page);
2239

2240
		put_page(page);
2241
	}
C
Chris Wilson 已提交
2242
	obj->mm.dirty = false;
2243

2244 2245
	sg_free_table(pages);
	kfree(pages);
2246
}
C
Chris Wilson 已提交
2247

2248 2249 2250
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
2251
	void __rcu **slot;
2252

C
Chris Wilson 已提交
2253 2254
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2255 2256
}

2257 2258
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				 enum i915_mm_subclass subclass)
2259
{
2260
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
2261
	struct sg_table *pages;
2262

C
Chris Wilson 已提交
2263
	if (i915_gem_object_has_pinned_pages(obj))
2264
		return;
2265

2266
	GEM_BUG_ON(obj->bind_count);
2267
	if (!i915_gem_object_has_pages(obj))
2268 2269 2270
		return;

	/* May be called by shrinker from within get_pages() (on another bo) */
2271
	mutex_lock_nested(&obj->mm.lock, subclass);
2272 2273
	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
		goto unlock;
B
Ben Widawsky 已提交
2274

2275 2276 2277
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2278 2279
	pages = fetch_and_zero(&obj->mm.pages);
	GEM_BUG_ON(!pages);
2280

2281 2282 2283 2284
	spin_lock(&i915->mm.obj_lock);
	list_del(&obj->mm.link);
	spin_unlock(&i915->mm.obj_lock);

C
Chris Wilson 已提交
2285
	if (obj->mm.mapping) {
2286 2287
		void *ptr;

2288
		ptr = page_mask_bits(obj->mm.mapping);
2289 2290
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2291
		else
2292 2293
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2294
		obj->mm.mapping = NULL;
2295 2296
	}

2297 2298
	__i915_gem_object_reset_page_iter(obj);

2299 2300 2301
	if (!IS_ERR(pages))
		obj->ops->put_pages(obj, pages);

2302 2303
	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;

2304 2305
unlock:
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
2306 2307
}

2308
static bool i915_sg_trim(struct sg_table *orig_st)
2309 2310 2311 2312 2313 2314
{
	struct sg_table new_st;
	struct scatterlist *sg, *new_sg;
	unsigned int i;

	if (orig_st->nents == orig_st->orig_nents)
2315
		return false;
2316

2317
	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2318
		return false;
2319 2320 2321 2322 2323 2324 2325

	new_sg = new_st.sgl;
	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
		/* called before being DMA mapped, no need to copy sg->dma_* */
		new_sg = sg_next(new_sg);
	}
2326
	GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2327 2328 2329 2330

	sg_free_table(orig_st);

	*orig_st = new_st;
2331
	return true;
2332 2333
}

2334
static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2335
{
2336
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2337 2338
	const unsigned long page_count = obj->base.size / PAGE_SIZE;
	unsigned long i;
2339
	struct address_space *mapping;
2340 2341
	struct sg_table *st;
	struct scatterlist *sg;
2342
	struct sgt_iter sgt_iter;
2343
	struct page *page;
2344
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2345
	unsigned int max_segment = i915_sg_segment_size();
M
Matthew Auld 已提交
2346
	unsigned int sg_page_sizes;
2347
	gfp_t noreclaim;
I
Imre Deak 已提交
2348
	int ret;
2349

C
Chris Wilson 已提交
2350 2351 2352 2353
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2354 2355
	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
C
Chris Wilson 已提交
2356

2357 2358
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
2359
		return -ENOMEM;
2360

2361
rebuild_st:
2362 2363
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2364
		return -ENOMEM;
2365
	}
2366

2367 2368 2369 2370 2371
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2372
	mapping = obj->base.filp->f_mapping;
2373
	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2374 2375
	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;

2376 2377
	sg = st->sgl;
	st->nents = 0;
M
Matthew Auld 已提交
2378
	sg_page_sizes = 0;
2379
	for (i = 0; i < page_count; i++) {
2380 2381 2382 2383 2384 2385 2386
		const unsigned int shrink[] = {
			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
			0,
		}, *s = shrink;
		gfp_t gfp = noreclaim;

		do {
C
Chris Wilson 已提交
2387
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2388 2389 2390 2391 2392 2393 2394 2395
			if (likely(!IS_ERR(page)))
				break;

			if (!*s) {
				ret = PTR_ERR(page);
				goto err_sg;
			}

2396
			i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2397
			cond_resched();
2398

C
Chris Wilson 已提交
2399 2400 2401
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
2402 2403 2404 2405
			 *
			 * However, since graphics tend to be disposable,
			 * defer the oom here by reporting the ENOMEM back
			 * to userspace.
C
Chris Wilson 已提交
2406
			 */
2407 2408 2409
			if (!*s) {
				/* reclaim and warn, but no oom */
				gfp = mapping_gfp_mask(mapping);
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421

				/* Our bo are always dirty and so we require
				 * kswapd to reclaim our pages (direct reclaim
				 * does not effectively begin pageout of our
				 * buffers on its own). However, direct reclaim
				 * only waits for kswapd when under allocation
				 * congestion. So as a result __GFP_RECLAIM is
				 * unreliable and fails to actually reclaim our
				 * dirty pages -- unless you try over and over
				 * again with !__GFP_NORETRY. However, we still
				 * want to fail this allocation rather than
				 * trigger the out-of-memory killer and for
M
Michal Hocko 已提交
2422
				 * this we want __GFP_RETRY_MAYFAIL.
2423
				 */
M
Michal Hocko 已提交
2424
				gfp |= __GFP_RETRY_MAYFAIL;
I
Imre Deak 已提交
2425
			}
2426 2427
		} while (1);

2428 2429 2430
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2431
			if (i) {
M
Matthew Auld 已提交
2432
				sg_page_sizes |= sg->length;
2433
				sg = sg_next(sg);
2434
			}
2435 2436 2437 2438 2439 2440
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2441 2442 2443

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2444
	}
2445
	if (sg) { /* loop terminated early; short sg table */
M
Matthew Auld 已提交
2446
		sg_page_sizes |= sg->length;
2447
		sg_mark_end(sg);
2448
	}
2449

2450 2451 2452
	/* Trim unused sg entries to avoid wasting memory. */
	i915_sg_trim(st);

2453
	ret = i915_gem_gtt_prepare_pages(obj, st);
2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
	if (ret) {
		/* DMA remapping failed? One possible cause is that
		 * it could not reserve enough large entries, asking
		 * for PAGE_SIZE chunks instead may be helpful.
		 */
		if (max_segment > PAGE_SIZE) {
			for_each_sgt_page(page, sgt_iter, st)
				put_page(page);
			sg_free_table(st);

			max_segment = PAGE_SIZE;
			goto rebuild_st;
		} else {
			dev_warn(&dev_priv->drm.pdev->dev,
				 "Failed to DMA remap %lu pages\n",
				 page_count);
			goto err_pages;
		}
	}
I
Imre Deak 已提交
2473

2474
	if (i915_gem_object_needs_bit17_swizzle(obj))
2475
		i915_gem_object_do_bit_17_swizzle(obj, st);
2476

M
Matthew Auld 已提交
2477
	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
2478 2479

	return 0;
2480

2481
err_sg:
2482
	sg_mark_end(sg);
2483
err_pages:
2484 2485
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2486 2487
	sg_free_table(st);
	kfree(st);
2488 2489 2490 2491 2492 2493 2494 2495 2496

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2497 2498 2499
	if (ret == -ENOSPC)
		ret = -ENOMEM;

2500
	return ret;
2501 2502 2503
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2504
				 struct sg_table *pages,
M
Matthew Auld 已提交
2505
				 unsigned int sg_page_sizes)
2506
{
2507 2508 2509 2510
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned long supported = INTEL_INFO(i915)->page_sizes;
	int i;

2511
	lockdep_assert_held(&obj->mm.lock);
2512 2513 2514 2515 2516

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
2517 2518

	if (i915_gem_object_is_tiled(obj) &&
2519
	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2520 2521 2522 2523
		GEM_BUG_ON(obj->mm.quirked);
		__i915_gem_object_pin_pages(obj);
		obj->mm.quirked = true;
	}
2524

M
Matthew Auld 已提交
2525 2526
	GEM_BUG_ON(!sg_page_sizes);
	obj->mm.page_sizes.phys = sg_page_sizes;
2527 2528

	/*
M
Matthew Auld 已提交
2529 2530 2531 2532 2533 2534
	 * Calculate the supported page-sizes which fit into the given
	 * sg_page_sizes. This will give us the page-sizes which we may be able
	 * to use opportunistically when later inserting into the GTT. For
	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
	 * 64K or 4K pages, although in practice this will depend on a number of
	 * other factors.
2535 2536 2537 2538 2539 2540 2541
	 */
	obj->mm.page_sizes.sg = 0;
	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
		if (obj->mm.page_sizes.phys & ~0u << i)
			obj->mm.page_sizes.sg |= BIT(i);
	}
	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2542 2543 2544 2545

	spin_lock(&i915->mm.obj_lock);
	list_add(&obj->mm.link, &i915->mm.unbound_list);
	spin_unlock(&i915->mm.obj_lock);
2546 2547 2548 2549
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
2550
	int err;
2551 2552 2553 2554 2555 2556

	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

2557 2558
	err = obj->ops->get_pages(obj);
	GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
2559

2560
	return err;
2561 2562
}

2563
/* Ensure that the associated pages are gathered from the backing storage
2564
 * and pinned into our object. i915_gem_object_pin_pages() may be called
2565
 * multiple times before they are released by a single call to
2566
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2567 2568 2569
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
C
Chris Wilson 已提交
2570
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2571
{
2572
	int err;
2573

2574 2575 2576
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;
2577

2578
	if (unlikely(!i915_gem_object_has_pages(obj))) {
2579 2580
		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2581 2582 2583
		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;
2584

2585 2586 2587
		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);
2588

2589 2590
unlock:
	mutex_unlock(&obj->mm.lock);
2591
	return err;
2592 2593
}

2594
/* The 'mapping' part of i915_gem_object_pin_map() below */
2595 2596
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2597 2598
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
C
Chris Wilson 已提交
2599
	struct sg_table *sgt = obj->mm.pages;
2600 2601
	struct sgt_iter sgt_iter;
	struct page *page;
2602 2603
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2604
	unsigned long i = 0;
2605
	pgprot_t pgprot;
2606 2607 2608
	void *addr;

	/* A single page can always be kmapped */
2609
	if (n_pages == 1 && type == I915_MAP_WB)
2610 2611
		return kmap(sg_page(sgt->sgl));

2612 2613
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
2614
		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2615 2616 2617
		if (!pages)
			return NULL;
	}
2618

2619 2620
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2621 2622 2623 2624

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2625
	switch (type) {
2626 2627 2628
	default:
		MISSING_CASE(type);
		/* fallthrough to use PAGE_KERNEL anyway */
2629 2630 2631 2632 2633 2634 2635 2636
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2637

2638
	if (pages != stack_pages)
M
Michal Hocko 已提交
2639
		kvfree(pages);
2640 2641 2642 2643 2644

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2645 2646
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2647
{
2648 2649 2650
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2651 2652
	int ret;

2653
	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2654

2655
	ret = mutex_lock_interruptible(&obj->mm.lock);
2656 2657 2658
	if (ret)
		return ERR_PTR(ret);

2659 2660 2661
	pinned = !(type & I915_MAP_OVERRIDE);
	type &= ~I915_MAP_OVERRIDE;

2662
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2663
		if (unlikely(!i915_gem_object_has_pages(obj))) {
2664 2665
			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2666 2667 2668
			ret = ____i915_gem_object_get_pages(obj);
			if (ret)
				goto err_unlock;
2669

2670 2671 2672
			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
2673 2674
		pinned = false;
	}
2675
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2676

2677
	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2678 2679 2680
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
2681
			goto err_unpin;
2682
		}
2683 2684 2685 2686 2687 2688

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2689
		ptr = obj->mm.mapping = NULL;
2690 2691
	}

2692 2693 2694 2695
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
2696
			goto err_unpin;
2697 2698
		}

2699
		obj->mm.mapping = page_pack_bits(ptr, type);
2700 2701
	}

2702 2703
out_unlock:
	mutex_unlock(&obj->mm.lock);
2704 2705
	return ptr;

2706 2707 2708 2709 2710
err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(ret);
	goto out_unlock;
2711 2712
}

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
			   const struct drm_i915_gem_pwrite *arg)
{
	struct address_space *mapping = obj->base.filp->f_mapping;
	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
	u64 remain, offset;
	unsigned int pg;

	/* Before we instantiate/pin the backing store for our use, we
	 * can prepopulate the shmemfs filp efficiently using a write into
	 * the pagecache. We avoid the penalty of instantiating all the
	 * pages, important if the user is just writing to a few and never
	 * uses the object on the GPU, and using a direct write into shmemfs
	 * allows it to avoid the cost of retrieving a page (either swapin
	 * or clearing-before-use) before it is overwritten.
	 */
2730
	if (i915_gem_object_has_pages(obj))
2731 2732
		return -ENODEV;

2733 2734 2735
	if (obj->mm.madv != I915_MADV_WILLNEED)
		return -EFAULT;

2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784
	/* Before the pages are instantiated the object is treated as being
	 * in the CPU domain. The pages will be clflushed as required before
	 * use, and we can freely write into the pages directly. If userspace
	 * races pwrite with any other operation; corruption will ensue -
	 * that is userspace's prerogative!
	 */

	remain = arg->size;
	offset = arg->offset;
	pg = offset_in_page(offset);

	do {
		unsigned int len, unwritten;
		struct page *page;
		void *data, *vaddr;
		int err;

		len = PAGE_SIZE - pg;
		if (len > remain)
			len = remain;

		err = pagecache_write_begin(obj->base.filp, mapping,
					    offset, len, 0,
					    &page, &data);
		if (err < 0)
			return err;

		vaddr = kmap(page);
		unwritten = copy_from_user(vaddr + pg, user_data, len);
		kunmap(page);

		err = pagecache_write_end(obj->base.filp, mapping,
					  offset, len, len - unwritten,
					  page, data);
		if (err < 0)
			return err;

		if (unwritten)
			return -EFAULT;

		remain -= len;
		user_data += len;
		offset += len;
		pg = 0;
	} while (remain);

	return 0;
}

2785 2786
static bool ban_context(const struct i915_gem_context *ctx,
			unsigned int score)
2787
{
2788
	return (i915_gem_context_is_bannable(ctx) &&
2789
		score >= CONTEXT_SCORE_BAN_THRESHOLD);
2790 2791
}

2792
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2793
{
2794 2795
	unsigned int score;
	bool banned;
2796

2797
	atomic_inc(&ctx->guilty_count);
2798

2799 2800 2801 2802 2803
	score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
	banned = ban_context(ctx, score);
	DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
			 ctx->name, score, yesno(banned));
	if (!banned)
2804 2805
		return;

2806 2807 2808 2809 2810 2811
	i915_gem_context_set_banned(ctx);
	if (!IS_ERR_OR_NULL(ctx->file_priv)) {
		atomic_inc(&ctx->file_priv->context_bans);
		DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
				 ctx->name, atomic_read(&ctx->file_priv->context_bans));
	}
2812 2813 2814 2815
}

static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
2816
	atomic_inc(&ctx->active_count);
2817 2818
}

2819
struct drm_i915_gem_request *
2820
i915_gem_find_active_request(struct intel_engine_cs *engine)
2821
{
2822 2823
	struct drm_i915_gem_request *request, *active = NULL;
	unsigned long flags;
2824

2825 2826 2827 2828 2829 2830 2831 2832
	/* We are called by the error capture and reset at a random
	 * point in time. In particular, note that neither is crucially
	 * ordered with an interrupt. After a hang, the GPU is dead and we
	 * assume that no more writes can happen (we waited long enough for
	 * all writes that were in transaction to be flushed) - adding an
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
	 */
2833
	spin_lock_irqsave(&engine->timeline->lock, flags);
2834
	list_for_each_entry(request, &engine->timeline->requests, link) {
2835 2836
		if (__i915_gem_request_completed(request,
						 request->global_seqno))
2837
			continue;
2838

2839
		GEM_BUG_ON(request->engine != engine);
2840 2841
		GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
				    &request->fence.flags));
2842 2843 2844

		active = request;
		break;
2845
	}
2846
	spin_unlock_irqrestore(&engine->timeline->lock, flags);
2847

2848
	return active;
2849 2850
}

2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
static bool engine_stalled(struct intel_engine_cs *engine)
{
	if (!engine->hangcheck.stalled)
		return false;

	/* Check for possible seqno movement after hang declaration */
	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
		DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
		return false;
	}

	return true;
}

2865 2866 2867 2868 2869 2870 2871 2872 2873
/*
 * Ensure irq handler finishes, and not run again.
 * Also return the active request so that we only search for it once.
 */
struct drm_i915_gem_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
	struct drm_i915_gem_request *request = NULL;

2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884
	/*
	 * During the reset sequence, we must prevent the engine from
	 * entering RC6. As the context state is undefined until we restart
	 * the engine, if it does enter RC6 during the reset, the state
	 * written to the powercontext is undefined and so we may lose
	 * GPU state upon resume, i.e. fail to restart after a reset.
	 */
	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);

	/*
	 * Prevent the signaler thread from updating the request
2885 2886 2887 2888 2889 2890 2891 2892 2893 2894
	 * state (by calling dma_fence_signal) as we are processing
	 * the reset. The write from the GPU of the seqno is
	 * asynchronous and the signaler thread may see a different
	 * value to us and declare the request complete, even though
	 * the reset routine have picked that request as the active
	 * (incomplete) request. This conflict is not handled
	 * gracefully!
	 */
	kthread_park(engine->breadcrumbs.signaler);

2895 2896
	/*
	 * Prevent request submission to the hardware until we have
2897 2898 2899 2900 2901 2902 2903
	 * completed the reset in i915_gem_reset_finish(). If a request
	 * is completed by one engine, it may then queue a request
	 * to a second via its engine->irq_tasklet *just* as we are
	 * calling engine->init_hw() and also writing the ELSP.
	 * Turning off the engine->irq_tasklet until the reset is over
	 * prevents the race.
	 */
2904 2905
	tasklet_kill(&engine->execlists.irq_tasklet);
	tasklet_disable(&engine->execlists.irq_tasklet);
2906 2907 2908 2909

	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);

2910 2911 2912
	request = i915_gem_find_active_request(engine);
	if (request && request->fence.error == -EIO)
		request = ERR_PTR(-EIO); /* Previous reset failed! */
2913 2914 2915 2916

	return request;
}

2917
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2918 2919
{
	struct intel_engine_cs *engine;
2920
	struct drm_i915_gem_request *request;
2921
	enum intel_engine_id id;
2922
	int err = 0;
2923

2924
	for_each_engine(engine, dev_priv, id) {
2925 2926 2927 2928
		request = i915_gem_reset_prepare_engine(engine);
		if (IS_ERR(request)) {
			err = PTR_ERR(request);
			continue;
2929
		}
2930 2931

		engine->hangcheck.active_request = request;
2932 2933
	}

2934
	i915_gem_revoke_fences(dev_priv);
2935 2936

	return err;
2937 2938
}

2939
static void skip_request(struct drm_i915_gem_request *request)
2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
2954 2955

	dma_fence_set_error(&request->fence, -EIO);
2956 2957
}

2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
static void engine_skip_context(struct drm_i915_gem_request *request)
{
	struct intel_engine_cs *engine = request->engine;
	struct i915_gem_context *hung_ctx = request->ctx;
	struct intel_timeline *timeline;
	unsigned long flags;

	timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);

	spin_lock_irqsave(&engine->timeline->lock, flags);
	spin_lock(&timeline->lock);

	list_for_each_entry_continue(request, &engine->timeline->requests, link)
		if (request->ctx == hung_ctx)
			skip_request(request);

	list_for_each_entry(request, &timeline->requests, link)
		skip_request(request);

	spin_unlock(&timeline->lock);
	spin_unlock_irqrestore(&engine->timeline->lock, flags);
}

2981 2982 2983 2984
/* Returns the request if it was guilty of the hang */
static struct drm_i915_gem_request *
i915_gem_reset_request(struct intel_engine_cs *engine,
		       struct drm_i915_gem_request *request)
2985
{
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006
	/* The guilty request will get skipped on a hung engine.
	 *
	 * Users of client default contexts do not rely on logical
	 * state preserved between batches so it is safe to execute
	 * queued requests following the hang. Non default contexts
	 * rely on preserved state, so skipping a batch loses the
	 * evolution of the state and it needs to be considered corrupted.
	 * Executing more queued batches on top of corrupted state is
	 * risky. But we take the risk by trying to advance through
	 * the queued requests in order to make the client behaviour
	 * more predictable around resets, by not throwing away random
	 * amount of batches it has prepared for execution. Sophisticated
	 * clients can use gem_reset_stats_ioctl and dma fence status
	 * (exported via sync_file info ioctl on explicit fences) to observe
	 * when it loses the context state and should rebuild accordingly.
	 *
	 * The context ban, and ultimately the client ban, mechanism are safety
	 * valves if client submission ends up resulting in nothing more than
	 * subsequent hangs.
	 */

3007
	if (engine_stalled(engine)) {
3008 3009
		i915_gem_context_mark_guilty(request->ctx);
		skip_request(request);
3010 3011 3012 3013

		/* If this context is now banned, skip all pending requests. */
		if (i915_gem_context_is_banned(request->ctx))
			engine_skip_context(request);
3014
	} else {
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031
		/*
		 * Since this is not the hung engine, it may have advanced
		 * since the hang declaration. Double check by refinding
		 * the active request at the time of the reset.
		 */
		request = i915_gem_find_active_request(engine);
		if (request) {
			i915_gem_context_mark_innocent(request->ctx);
			dma_fence_set_error(&request->fence, -EAGAIN);

			/* Rewind the engine to replay the incomplete rq */
			spin_lock_irq(&engine->timeline->lock);
			request = list_prev_entry(request, link);
			if (&request->link == &engine->timeline->requests)
				request = NULL;
			spin_unlock_irq(&engine->timeline->lock);
		}
3032 3033
	}

3034
	return request;
3035 3036
}

3037 3038
void i915_gem_reset_engine(struct intel_engine_cs *engine,
			   struct drm_i915_gem_request *request)
3039
{
3040 3041
	engine->irq_posted = 0;

3042 3043 3044 3045
	if (request)
		request = i915_gem_reset_request(engine, request);

	if (request) {
3046 3047 3048
		DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
				 engine->name, request->global_seqno);
	}
3049 3050 3051

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset_hw(engine, request);
3052
}
3053

3054
void i915_gem_reset(struct drm_i915_private *dev_priv)
3055
{
3056
	struct intel_engine_cs *engine;
3057
	enum intel_engine_id id;
3058

3059 3060
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

3061 3062
	i915_gem_retire_requests(dev_priv);

3063 3064 3065
	for_each_engine(engine, dev_priv, id) {
		struct i915_gem_context *ctx;

3066
		i915_gem_reset_engine(engine, engine->hangcheck.active_request);
3067 3068 3069 3070
		ctx = fetch_and_zero(&engine->last_retired_context);
		if (ctx)
			engine->context_unpin(engine, ctx);
	}
3071

3072
	i915_gem_restore_fences(dev_priv);
3073 3074 3075 3076 3077 3078 3079

	if (dev_priv->gt.awake) {
		intel_sanitize_gt_powersave(dev_priv);
		intel_enable_gt_powersave(dev_priv);
		if (INTEL_GEN(dev_priv) >= 6)
			gen6_rps_busy(dev_priv);
	}
3080 3081
}

3082 3083
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
3084
	tasklet_enable(&engine->execlists.irq_tasklet);
3085
	kthread_unpark(engine->breadcrumbs.signaler);
3086 3087

	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
3088 3089
}

3090 3091
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
3092 3093 3094
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

3095
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
3096

3097
	for_each_engine(engine, dev_priv, id) {
3098
		engine->hangcheck.active_request = NULL;
3099
		i915_gem_reset_finish_engine(engine);
3100
	}
3101 3102
}

3103
static void nop_submit_request(struct drm_i915_gem_request *request)
3104 3105 3106 3107 3108 3109 3110
{
	dma_fence_set_error(&request->fence, -EIO);

	i915_gem_request_submit(request);
}

static void nop_complete_submit_request(struct drm_i915_gem_request *request)
3111
{
3112 3113
	unsigned long flags;

3114
	dma_fence_set_error(&request->fence, -EIO);
3115 3116 3117

	spin_lock_irqsave(&request->engine->timeline->lock, flags);
	__i915_gem_request_submit(request);
3118
	intel_engine_init_global_seqno(request->engine, request->global_seqno);
3119
	spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
3120 3121
}

3122
void i915_gem_set_wedged(struct drm_i915_private *i915)
3123
{
3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	/*
	 * First, stop submission to hw, but do not yet complete requests by
	 * rolling the global seqno forward (since this would complete requests
	 * for which we haven't set the fence error to EIO yet).
	 */
	for_each_engine(engine, i915, id)
		engine->submit_request = nop_submit_request;

	/*
	 * Make sure no one is running the old callback before we proceed with
	 * cancelling requests and resetting the completion tracking. Otherwise
	 * we might submit a request to the hardware which never completes.
3139
	 */
3140
	synchronize_rcu();
3141

3142 3143 3144
	for_each_engine(engine, i915, id) {
		/* Mark all executing requests as skipped */
		engine->cancel_requests(engine);
3145

3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156
		/*
		 * Only once we've force-cancelled all in-flight requests can we
		 * start to complete all requests.
		 */
		engine->submit_request = nop_complete_submit_request;
	}

	/*
	 * Make sure no request can slip through without getting completed by
	 * either this call here to intel_engine_init_global_seqno, or the one
	 * in nop_complete_submit_request.
3157
	 */
3158
	synchronize_rcu();
3159

3160 3161
	for_each_engine(engine, i915, id) {
		unsigned long flags;
3162

3163 3164 3165 3166 3167 3168 3169 3170 3171
		/* Mark all pending requests as complete so that any concurrent
		 * (lockless) lookup doesn't try and wait upon the request as we
		 * reset it.
		 */
		spin_lock_irqsave(&engine->timeline->lock, flags);
		intel_engine_init_global_seqno(engine,
					       intel_engine_last_submit(engine));
		spin_unlock_irqrestore(&engine->timeline->lock, flags);
	}
3172

3173 3174
	set_bit(I915_WEDGED, &i915->gpu_error.flags);
	wake_up_all(&i915->gpu_error.reset_queue);
3175 3176
}

3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
	struct i915_gem_timeline *tl;
	int i;

	lockdep_assert_held(&i915->drm.struct_mutex);
	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
		return true;

	/* Before unwedging, make sure that all pending operations
	 * are flushed and errored out - we may have requests waiting upon
	 * third party fences. We marked all inflight requests as EIO, and
	 * every execbuf since returned EIO, for consistency we want all
	 * the currently pending requests to also be marked as EIO, which
	 * is done inside our nop_submit_request - and so we must wait.
	 *
	 * No more can be submitted until we reset the wedged bit.
	 */
	list_for_each_entry(tl, &i915->gt.timelines, link) {
		for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
			struct drm_i915_gem_request *rq;

			rq = i915_gem_active_peek(&tl->engine[i].last_request,
						  &i915->drm.struct_mutex);
			if (!rq)
				continue;

			/* We can't use our normal waiter as we want to
			 * avoid recursively trying to handle the current
			 * reset. The basic dma_fence_default_wait() installs
			 * a callback for dma_fence_signal(), which is
			 * triggered by our nop handler (indirectly, the
			 * callback enables the signaler thread which is
			 * woken by the nop_submit_request() advancing the seqno
			 * and when the seqno passes the fence, the signaler
			 * then signals the fence waking us up).
			 */
			if (dma_fence_default_wait(&rq->fence, true,
						   MAX_SCHEDULE_TIMEOUT) < 0)
				return false;
		}
	}

	/* Undo nop_submit_request. We prevent all new i915 requests from
	 * being queued (by disallowing execbuf whilst wedged) so having
	 * waited for all active requests above, we know the system is idle
	 * and do not have to worry about a thread being inside
	 * engine->submit_request() as we swap over. So unlike installing
	 * the nop_submit_request on reset, we can do this from normal
	 * context and do not require stop_machine().
	 */
	intel_engines_reset_default_submission(i915);
3229
	i915_gem_contexts_lost(i915);
3230 3231 3232 3233 3234 3235 3236

	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
	clear_bit(I915_WEDGED, &i915->gpu_error.flags);

	return true;
}

3237
static void
3238 3239
i915_gem_retire_work_handler(struct work_struct *work)
{
3240
	struct drm_i915_private *dev_priv =
3241
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
3242
	struct drm_device *dev = &dev_priv->drm;
3243

3244
	/* Come back later if the device is busy... */
3245
	if (mutex_trylock(&dev->struct_mutex)) {
3246
		i915_gem_retire_requests(dev_priv);
3247
		mutex_unlock(&dev->struct_mutex);
3248
	}
3249 3250 3251 3252 3253

	/* Keep the retire handler running until we are finally idle.
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
3254 3255
	if (READ_ONCE(dev_priv->gt.awake)) {
		i915_queue_hangcheck(dev_priv);
3256 3257
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
3258
				   round_jiffies_up_relative(HZ));
3259
	}
3260
}
3261

3262 3263 3264 3265
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
3266
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
3267
	struct drm_device *dev = &dev_priv->drm;
3268 3269 3270 3271 3272
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

3273 3274 3275 3276
	/*
	 * Wait for last execlists context complete, but bail out in case a
	 * new request is submitted.
	 */
3277
	wait_for(intel_engines_are_idle(dev_priv), 10);
3278
	if (READ_ONCE(dev_priv->gt.active_requests))
3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291
		return;

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

	if (!mutex_trylock(&dev->struct_mutex)) {
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

3292 3293 3294 3295 3296 3297 3298
	/*
	 * New request retired after this work handler started, extend active
	 * period until next instance of the work.
	 */
	if (work_pending(work))
		goto out_unlock;

3299
	if (dev_priv->gt.active_requests)
3300
		goto out_unlock;
3301

3302
	if (wait_for(intel_engines_are_idle(dev_priv), 10))
3303 3304
		DRM_ERROR("Timeout waiting for engines to idle\n");

3305
	intel_engines_mark_idle(dev_priv);
3306
	i915_gem_timelines_mark_idle(dev_priv);
3307

3308 3309 3310
	GEM_BUG_ON(!dev_priv->gt.awake);
	dev_priv->gt.awake = false;
	rearm_hangcheck = false;
3311

3312 3313 3314 3315 3316
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_idle(dev_priv);
	intel_runtime_pm_put(dev_priv);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
3317

3318 3319 3320 3321
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
3322
	}
3323 3324
}

3325 3326
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
3327
	struct drm_i915_private *i915 = to_i915(gem->dev);
3328 3329
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
3330
	struct i915_lut_handle *lut, *ln;
3331

3332 3333 3334 3335 3336 3337
	mutex_lock(&i915->drm.struct_mutex);

	list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
		struct i915_gem_context *ctx = lut->ctx;
		struct i915_vma *vma;

3338
		GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3339 3340 3341 3342
		if (ctx->file_priv != fpriv)
			continue;

		vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3343 3344 3345 3346 3347 3348 3349
		GEM_BUG_ON(vma->obj != obj);

		/* We allow the process to have multiple handles to the same
		 * vma, in the same fd namespace, by virtue of flink/open.
		 */
		GEM_BUG_ON(!vma->open_count);
		if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3350
			i915_vma_close(vma);
3351

3352 3353
		list_del(&lut->obj_link);
		list_del(&lut->ctx_link);
3354

3355 3356
		kmem_cache_free(i915->luts, lut);
		__i915_gem_object_release_unless_active(obj);
3357
	}
3358 3359

	mutex_unlock(&i915->drm.struct_mutex);
3360 3361
}

3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372
static unsigned long to_wait_timeout(s64 timeout_ns)
{
	if (timeout_ns < 0)
		return MAX_SCHEDULE_TIMEOUT;

	if (timeout_ns == 0)
		return 0;

	return nsecs_to_jiffies_timeout(timeout_ns);
}

3373 3374
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3375 3376 3377
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
3378 3379 3380 3381 3382 3383 3384
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
3385
 *  -EAGAIN: incomplete, restart syscall
3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
3402 3403
	ktime_t start;
	long ret;
3404

3405 3406 3407
	if (args->flags != 0)
		return -EINVAL;

3408
	obj = i915_gem_object_lookup(file, args->bo_handle);
3409
	if (!obj)
3410 3411
		return -ENOENT;

3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
	start = ktime_get();

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
				   to_wait_timeout(args->timeout_ns),
				   to_rps_client(file));

	if (args->timeout_ns > 0) {
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432

		/*
		 * Apparently ktime isn't accurate enough and occasionally has a
		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
		 * things up to make the test happy. We allow up to 1 jiffy.
		 *
		 * This is a regression from the timespec->ktime conversion.
		 */
		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
			args->timeout_ns = 0;
3433 3434 3435 3436

		/* Asked to wait beyond the jiffie/scheduler precision? */
		if (ret == -ETIME && args->timeout_ns)
			ret = -EAGAIN;
3437 3438
	}

C
Chris Wilson 已提交
3439
	i915_gem_object_put(obj);
3440
	return ret;
3441 3442
}

3443
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3444
{
3445
	int ret, i;
3446

3447 3448 3449 3450 3451
	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
		if (ret)
			return ret;
	}
3452

3453 3454 3455
	return 0;
}

3456 3457
static int wait_for_engines(struct drm_i915_private *i915)
{
3458 3459 3460 3461
	if (wait_for(intel_engines_are_idle(i915), 50)) {
		DRM_ERROR("Failed to idle engines, declaring wedged!\n");
		i915_gem_set_wedged(i915);
		return -EIO;
3462 3463 3464 3465 3466
	}

	return 0;
}

3467 3468 3469 3470
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
	int ret;

3471 3472 3473 3474
	/* If the device is asleep, we have no requests outstanding */
	if (!READ_ONCE(i915->gt.awake))
		return 0;

3475 3476 3477 3478 3479 3480 3481 3482 3483 3484
	if (flags & I915_WAIT_LOCKED) {
		struct i915_gem_timeline *tl;

		lockdep_assert_held(&i915->drm.struct_mutex);

		list_for_each_entry(tl, &i915->gt.timelines, link) {
			ret = wait_for_timeline(tl, flags);
			if (ret)
				return ret;
		}
3485 3486 3487

		i915_gem_retire_requests(i915);
		GEM_BUG_ON(i915->gt.active_requests);
3488 3489

		ret = wait_for_engines(i915);
3490 3491
	} else {
		ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3492
	}
3493

3494
	return ret;
3495 3496
}

3497 3498
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
3499 3500 3501 3502 3503 3504 3505
	/*
	 * We manually flush the CPU domain so that we can override and
	 * force the flush for the display, and perform it asyncrhonously.
	 */
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
	if (obj->cache_dirty)
		i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3506 3507 3508 3509 3510
	obj->base.write_domain = 0;
}

void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
3511
	if (!READ_ONCE(obj->pin_global))
3512 3513 3514 3515 3516 3517 3518
		return;

	mutex_lock(&obj->base.dev->struct_mutex);
	__i915_gem_object_flush_for_display(obj);
	mutex_unlock(&obj->base.dev->struct_mutex);
}

3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581
/**
 * Moves a single object to the WC read, and possibly write domain.
 * @obj: object to act on
 * @write: ask for write access or read only
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
int
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
{
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
	if (ret)
		return ret;

	if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
		return 0;

	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

	flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);

	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * WC domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
		mb();

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_WC;
	if (write) {
		obj->base.read_domains = I915_GEM_DOMAIN_WC;
		obj->base.write_domain = I915_GEM_DOMAIN_WC;
		obj->mm.dirty = true;
	}

	i915_gem_object_unpin_pages(obj);
	return 0;
}

3582 3583
/**
 * Moves a single object to the GTT read, and possibly write domain.
3584 3585
 * @obj: object to act on
 * @write: ask for write access or read only
3586 3587 3588 3589
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3590
int
3591
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3592
{
3593
	int ret;
3594

3595
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3596

3597 3598 3599 3600 3601 3602
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3603 3604 3605
	if (ret)
		return ret;

3606 3607 3608
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3609 3610 3611 3612 3613 3614 3615 3616
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
C
Chris Wilson 已提交
3617
	ret = i915_gem_object_pin_pages(obj);
3618 3619 3620
	if (ret)
		return ret;

3621
	flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
C
Chris Wilson 已提交
3622

3623 3624 3625 3626 3627 3628 3629
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3630 3631 3632
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3633
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3634
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3635
	if (write) {
3636 3637
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
C
Chris Wilson 已提交
3638
		obj->mm.dirty = true;
3639 3640
	}

C
Chris Wilson 已提交
3641
	i915_gem_object_unpin_pages(obj);
3642 3643 3644
	return 0;
}

3645 3646
/**
 * Changes the cache-level of an object across all VMA.
3647 3648
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3660 3661 3662
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3663
	struct i915_vma *vma;
3664
	int ret;
3665

3666 3667
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3668
	if (obj->cache_level == cache_level)
3669
		return 0;
3670

3671 3672 3673 3674 3675
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3676 3677
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3678 3679 3680
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3681
		if (i915_vma_is_pinned(vma)) {
3682 3683 3684 3685
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697
		if (i915_gem_valid_gtt_space(vma, cache_level))
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3698 3699
	}

3700 3701 3702 3703 3704 3705 3706
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3707
	if (obj->bind_count) {
3708 3709 3710 3711
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3712 3713 3714 3715 3716 3717
		ret = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_LOCKED |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT,
					   NULL);
3718 3719 3720
		if (ret)
			return ret;

3721 3722
		if (!HAS_LLC(to_i915(obj->base.dev)) &&
		    cache_level != I915_CACHE_NONE) {
3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3739 3740 3741 3742 3743
			list_for_each_entry(vma, &obj->vma_list, obj_link) {
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
3744 3745 3746 3747 3748 3749 3750 3751
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3752 3753
		}

3754
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3755 3756 3757 3758 3759 3760 3761
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3762 3763
	}

3764
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3765
		vma->node.color = cache_level;
3766
	i915_gem_object_set_cache_coherency(obj, cache_level);
3767
	obj->cache_dirty = true; /* Always invalidate stale cachelines */
3768

3769 3770 3771
	return 0;
}

B
Ben Widawsky 已提交
3772 3773
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3774
{
B
Ben Widawsky 已提交
3775
	struct drm_i915_gem_caching *args = data;
3776
	struct drm_i915_gem_object *obj;
3777
	int err = 0;
3778

3779 3780 3781 3782 3783 3784
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
	if (!obj) {
		err = -ENOENT;
		goto out;
	}
3785

3786 3787 3788 3789 3790 3791
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3792 3793 3794 3795
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3796 3797 3798 3799
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3800 3801 3802
out:
	rcu_read_unlock();
	return err;
3803 3804
}

B
Ben Widawsky 已提交
3805 3806
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3807
{
3808
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
3809
	struct drm_i915_gem_caching *args = data;
3810 3811
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
3812
	int ret = 0;
3813

B
Ben Widawsky 已提交
3814 3815
	switch (args->caching) {
	case I915_CACHING_NONE:
3816 3817
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3818
	case I915_CACHING_CACHED:
3819 3820 3821 3822 3823 3824
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3825
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3826 3827
			return -ENODEV;

3828 3829
		level = I915_CACHE_LLC;
		break;
3830
	case I915_CACHING_DISPLAY:
3831
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3832
		break;
3833 3834 3835 3836
	default:
		return -EINVAL;
	}

3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
		return -ENOENT;

	if (obj->cache_level == level)
		goto out;

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
B
Ben Widawsky 已提交
3848
	if (ret)
3849
		goto out;
B
Ben Widawsky 已提交
3850

3851 3852 3853
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto out;
3854 3855 3856

	ret = i915_gem_object_set_cache_level(obj, level);
	mutex_unlock(&dev->struct_mutex);
3857 3858 3859

out:
	i915_gem_object_put(obj);
3860 3861 3862
	return ret;
}

3863
/*
3864 3865 3866
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3867
 */
C
Chris Wilson 已提交
3868
struct i915_vma *
3869 3870
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3871
				     const struct i915_ggtt_view *view)
3872
{
C
Chris Wilson 已提交
3873
	struct i915_vma *vma;
3874 3875
	int ret;

3876 3877
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3878
	/* Mark the global pin early so that we account for the
3879 3880
	 * display coherency whilst setting up the cache domains.
	 */
3881
	obj->pin_global++;
3882

3883 3884 3885 3886 3887 3888 3889 3890 3891
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3892
	ret = i915_gem_object_set_cache_level(obj,
3893 3894
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
3895 3896
	if (ret) {
		vma = ERR_PTR(ret);
3897
		goto err_unpin_global;
C
Chris Wilson 已提交
3898
	}
3899

3900 3901
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3902 3903 3904 3905
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
3906
	 */
3907
	vma = ERR_PTR(-ENOSPC);
3908
	if (!view || view->type == I915_GGTT_VIEW_NORMAL)
3909 3910
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
					       PIN_MAPPABLE | PIN_NONBLOCK);
3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926
	if (IS_ERR(vma)) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);
		unsigned int flags;

		/* Valleyview is definitely limited to scanning out the first
		 * 512MiB. Lets presume this behaviour was inherited from the
		 * g4x display engine and that all earlier gen are similarly
		 * limited. Testing suggests that it is a little more
		 * complicated than this. For example, Cherryview appears quite
		 * happy to scanout from anywhere within its global aperture.
		 */
		flags = 0;
		if (HAS_GMCH_DISPLAY(i915))
			flags = PIN_MAPPABLE;
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
	}
C
Chris Wilson 已提交
3927
	if (IS_ERR(vma))
3928
		goto err_unpin_global;
3929

3930 3931
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

3932
	/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
3933
	__i915_gem_object_flush_for_display(obj);
3934
	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
3935

3936 3937 3938
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3939
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3940

C
Chris Wilson 已提交
3941
	return vma;
3942

3943 3944
err_unpin_global:
	obj->pin_global--;
C
Chris Wilson 已提交
3945
	return vma;
3946 3947 3948
}

void
C
Chris Wilson 已提交
3949
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3950
{
3951
	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3952

3953
	if (WARN_ON(vma->obj->pin_global == 0))
3954 3955
		return;

3956
	if (--vma->obj->pin_global == 0)
3957
		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
3958

3959
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
3960
	i915_gem_object_bump_inactive_ggtt(vma->obj);
3961

C
Chris Wilson 已提交
3962
	i915_vma_unpin(vma);
3963 3964
}

3965 3966
/**
 * Moves a single object to the CPU read, and possibly write domain.
3967 3968
 * @obj: object to act on
 * @write: requesting write or read-only access
3969 3970 3971 3972
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3973
int
3974
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3975 3976 3977
{
	int ret;

3978
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3979

3980 3981 3982 3983 3984 3985
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3986 3987 3988
	if (ret)
		return ret;

3989
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3990

3991
	/* Flush the CPU cache if it's still invalid. */
3992
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3993
		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
3994
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3995 3996 3997 3998 3999
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4000
	GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
4001 4002 4003 4004

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
4005 4006
	if (write)
		__start_cpu_write(obj);
4007 4008 4009 4010

	return 0;
}

4011 4012 4013
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
4014 4015 4016 4017
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
4018 4019 4020
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
4021
static int
4022
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4023
{
4024
	struct drm_i915_private *dev_priv = to_i915(dev);
4025
	struct drm_i915_file_private *file_priv = file->driver_priv;
4026
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4027
	struct drm_i915_gem_request *request, *target = NULL;
4028
	long ret;
4029

4030 4031 4032
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
4033

4034
	spin_lock(&file_priv->mm.lock);
4035
	list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
4036 4037
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
4038

4039 4040 4041 4042
		if (target) {
			list_del(&target->client_link);
			target->file_priv = NULL;
		}
4043

4044
		target = request;
4045
	}
4046
	if (target)
4047
		i915_gem_request_get(target);
4048
	spin_unlock(&file_priv->mm.lock);
4049

4050
	if (target == NULL)
4051
		return 0;
4052

4053 4054 4055
	ret = i915_wait_request(target,
				I915_WAIT_INTERRUPTIBLE,
				MAX_SCHEDULE_TIMEOUT);
4056
	i915_gem_request_put(target);
4057

4058
	return ret < 0 ? ret : 0;
4059 4060
}

C
Chris Wilson 已提交
4061
struct i915_vma *
4062 4063
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
4064
			 u64 size,
4065 4066
			 u64 alignment,
			 u64 flags)
4067
{
4068 4069
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.base;
4070 4071
	struct i915_vma *vma;
	int ret;
4072

4073 4074
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105
	if (!view && flags & PIN_MAPPABLE) {
		/* If the required space is larger than the available
		 * aperture, we will not able to find a slot for the
		 * object and unbinding the object now will be in
		 * vain. Worse, doing so may cause us to ping-pong
		 * the object in and out of the Global GTT and
		 * waste a lot of cycles under the mutex.
		 */
		if (obj->base.size > dev_priv->ggtt.mappable_end)
			return ERR_PTR(-E2BIG);

		/* If NONBLOCK is set the caller is optimistically
		 * trying to cache the full object within the mappable
		 * aperture, and *must* have a fallback in place for
		 * situations where we cannot bind the object. We
		 * can be a little more lax here and use the fallback
		 * more often to avoid costly migrations of ourselves
		 * and other objects within the aperture.
		 *
		 * Half-the-aperture is used as a simple heuristic.
		 * More interesting would to do search for a free
		 * block prior to making the commitment to unbind.
		 * That caters for the self-harm case, and with a
		 * little more heuristics (e.g. NOFAULT, NOEVICT)
		 * we could try to minimise harm to others.
		 */
		if (flags & PIN_NONBLOCK &&
		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
			return ERR_PTR(-ENOSPC);
	}

4106
	vma = i915_vma_instance(obj, vm, view);
4107
	if (unlikely(IS_ERR(vma)))
C
Chris Wilson 已提交
4108
		return vma;
4109 4110

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
4111 4112 4113
		if (flags & PIN_NONBLOCK) {
			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
				return ERR_PTR(-ENOSPC);
4114

4115
			if (flags & PIN_MAPPABLE &&
4116
			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
4117 4118 4119
				return ERR_PTR(-ENOSPC);
		}

4120 4121
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
4122 4123 4124
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
4125
		     !!(flags & PIN_MAPPABLE),
4126
		     i915_vma_is_map_and_fenceable(vma));
4127 4128
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
4129
			return ERR_PTR(ret);
4130 4131
	}

C
Chris Wilson 已提交
4132 4133 4134
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
4135

C
Chris Wilson 已提交
4136
	return vma;
4137 4138
}

4139
static __always_inline unsigned int __busy_read_flag(unsigned int id)
4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
4154 4155 4156 4157 4158 4159 4160 4161 4162
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
4163 4164
}

4165
static __always_inline unsigned int
4166
__busy_set_if_active(const struct dma_fence *fence,
4167 4168
		     unsigned int (*flag)(unsigned int id))
{
4169
	struct drm_i915_gem_request *rq;
4170

4171 4172 4173 4174
	/* We have to check the current hw status of the fence as the uABI
	 * guarantees forward progress. We could rely on the idle worker
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
4175
	 *
4176
	 * Note we only report on the status of native fences.
4177
	 */
4178 4179 4180 4181 4182 4183 4184 4185
	if (!dma_fence_is_i915(fence))
		return 0;

	/* opencode to_request() in order to avoid const warnings */
	rq = container_of(fence, struct drm_i915_gem_request, fence);
	if (i915_gem_request_completed(rq))
		return 0;

4186
	return flag(rq->engine->uabi_id);
4187 4188
}

4189
static __always_inline unsigned int
4190
busy_check_reader(const struct dma_fence *fence)
4191
{
4192
	return __busy_set_if_active(fence, __busy_read_flag);
4193 4194
}

4195
static __always_inline unsigned int
4196
busy_check_writer(const struct dma_fence *fence)
4197
{
4198 4199 4200 4201
	if (!fence)
		return 0;

	return __busy_set_if_active(fence, __busy_write_id);
4202 4203
}

4204 4205
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4206
		    struct drm_file *file)
4207 4208
{
	struct drm_i915_gem_busy *args = data;
4209
	struct drm_i915_gem_object *obj;
4210 4211
	struct reservation_object_list *list;
	unsigned int seq;
4212
	int err;
4213

4214
	err = -ENOENT;
4215 4216
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
4217
	if (!obj)
4218
		goto out;
4219

4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237
	/* A discrepancy here is that we do not report the status of
	 * non-i915 fences, i.e. even though we may report the object as idle,
	 * a call to set-domain may still stall waiting for foreign rendering.
	 * This also means that wait-ioctl may report an object as busy,
	 * where busy-ioctl considers it idle.
	 *
	 * We trade the ability to warn of foreign fences to report on which
	 * i915 engines are active for the object.
	 *
	 * Alternatively, we can trade that extra information on read/write
	 * activity with
	 *	args->busy =
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
	 * to report the overall busyness. This is what the wait-ioctl does.
	 *
	 */
retry:
	seq = raw_read_seqcount(&obj->resv->seq);
4238

4239 4240
	/* Translate the exclusive fence to the READ *and* WRITE engine */
	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4241

4242 4243 4244 4245
	/* Translate shared fences to READ set of engines */
	list = rcu_dereference(obj->resv->fence);
	if (list) {
		unsigned int shared_count = list->shared_count, i;
4246

4247 4248 4249 4250 4251 4252
		for (i = 0; i < shared_count; ++i) {
			struct dma_fence *fence =
				rcu_dereference(list->shared[i]);

			args->busy |= busy_check_reader(fence);
		}
4253
	}
4254

4255 4256 4257 4258
	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
		goto retry;

	err = 0;
4259 4260 4261
out:
	rcu_read_unlock();
	return err;
4262 4263 4264 4265 4266 4267
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4268
	return i915_gem_ring_throttle(dev, file_priv);
4269 4270
}

4271 4272 4273 4274
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4275
	struct drm_i915_private *dev_priv = to_i915(dev);
4276
	struct drm_i915_gem_madvise *args = data;
4277
	struct drm_i915_gem_object *obj;
4278
	int err;
4279 4280 4281 4282 4283 4284 4285 4286 4287

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4288
	obj = i915_gem_object_lookup(file_priv, args->handle);
4289 4290 4291 4292 4293 4294
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
4295

4296
	if (i915_gem_object_has_pages(obj) &&
4297
	    i915_gem_object_is_tiled(obj) &&
4298
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4299 4300
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
4301
			__i915_gem_object_unpin_pages(obj);
4302 4303 4304
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
4305
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
4306
			__i915_gem_object_pin_pages(obj);
4307 4308
			obj->mm.quirked = true;
		}
4309 4310
	}

C
Chris Wilson 已提交
4311 4312
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
4313

C
Chris Wilson 已提交
4314
	/* if the object is no longer attached, discard its backing storage */
4315 4316
	if (obj->mm.madv == I915_MADV_DONTNEED &&
	    !i915_gem_object_has_pages(obj))
4317 4318
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
4319
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
4320
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
4321

4322
out:
4323
	i915_gem_object_put(obj);
4324
	return err;
4325 4326
}

4327 4328 4329 4330 4331 4332 4333
static void
frontbuffer_retire(struct i915_gem_active *active,
		   struct drm_i915_gem_request *request)
{
	struct drm_i915_gem_object *obj =
		container_of(active, typeof(*obj), frontbuffer_write);

4334
	intel_fb_obj_flush(obj, ORIGIN_CS);
4335 4336
}

4337 4338
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4339
{
4340 4341
	mutex_init(&obj->mm.lock);

B
Ben Widawsky 已提交
4342
	INIT_LIST_HEAD(&obj->vma_list);
4343
	INIT_LIST_HEAD(&obj->lut_list);
4344
	INIT_LIST_HEAD(&obj->batch_pool_link);
4345

4346 4347
	obj->ops = ops;

4348 4349 4350
	reservation_object_init(&obj->__builtin_resv);
	obj->resv = &obj->__builtin_resv;

4351
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4352
	init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
C
Chris Wilson 已提交
4353 4354 4355 4356

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
4357

4358
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4359 4360
}

4361
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4362 4363
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,
4364

4365 4366
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
4367 4368

	.pwrite = i915_gem_object_pwrite_gtt,
4369 4370
};

M
Matthew Auld 已提交
4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394
static int i915_gem_object_create_shmem(struct drm_device *dev,
					struct drm_gem_object *obj,
					size_t size)
{
	struct drm_i915_private *i915 = to_i915(dev);
	unsigned long flags = VM_NORESERVE;
	struct file *filp;

	drm_gem_private_object_init(dev, obj, size);

	if (i915->mm.gemfs)
		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
						 flags);
	else
		filp = shmem_file_setup("i915", size, flags);

	if (IS_ERR(filp))
		return PTR_ERR(filp);

	obj->filp = filp;

	return 0;
}

4395
struct drm_i915_gem_object *
4396
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4397
{
4398
	struct drm_i915_gem_object *obj;
4399
	struct address_space *mapping;
4400
	unsigned int cache_level;
D
Daniel Vetter 已提交
4401
	gfp_t mask;
4402
	int ret;
4403

4404 4405 4406 4407 4408
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
4409
	if (size >> PAGE_SHIFT > INT_MAX)
4410 4411 4412 4413 4414
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

4415
	obj = i915_gem_object_alloc(dev_priv);
4416
	if (obj == NULL)
4417
		return ERR_PTR(-ENOMEM);
4418

M
Matthew Auld 已提交
4419
	ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4420 4421
	if (ret)
		goto fail;
4422

4423
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4424
	if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4425 4426 4427 4428 4429
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4430
	mapping = obj->base.filp->f_mapping;
4431
	mapping_set_gfp_mask(mapping, mask);
4432
	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4433

4434
	i915_gem_object_init(obj, &i915_gem_object_ops);
4435

4436 4437
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4438

4439
	if (HAS_LLC(dev_priv))
4440
		/* On some devices, we can have the GPU use the LLC (the CPU
4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
4452 4453 4454
		cache_level = I915_CACHE_LLC;
	else
		cache_level = I915_CACHE_NONE;
4455

4456
	i915_gem_object_set_cache_coherency(obj, cache_level);
4457

4458 4459
	trace_i915_gem_object_create(obj);

4460
	return obj;
4461 4462 4463 4464

fail:
	i915_gem_object_free(obj);
	return ERR_PTR(ret);
4465 4466
}

4467 4468 4469 4470 4471 4472 4473 4474
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

C
Chris Wilson 已提交
4475
	if (obj->mm.madv != I915_MADV_WILLNEED)
4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4491 4492
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
4493
{
4494
	struct drm_i915_gem_object *obj, *on;
4495

4496
	intel_runtime_pm_get(i915);
4497
	llist_for_each_entry_safe(obj, on, freed, freed) {
4498 4499 4500 4501
		struct i915_vma *vma, *vn;

		trace_i915_gem_object_destroy(obj);

4502 4503
		mutex_lock(&i915->drm.struct_mutex);

4504 4505 4506 4507 4508 4509 4510
		GEM_BUG_ON(i915_gem_object_is_active(obj));
		list_for_each_entry_safe(vma, vn,
					 &obj->vma_list, obj_link) {
			GEM_BUG_ON(i915_vma_is_active(vma));
			vma->flags &= ~I915_VMA_PIN_MASK;
			i915_vma_close(vma);
		}
4511 4512
		GEM_BUG_ON(!list_empty(&obj->vma_list));
		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4513

4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525
		/* This serializes freeing with the shrinker. Since the free
		 * is delayed, first by RCU then by the workqueue, we want the
		 * shrinker to be able to free pages of unreferenced objects,
		 * or else we may oom whilst there are plenty of deferred
		 * freed objects.
		 */
		if (i915_gem_object_has_pages(obj)) {
			spin_lock(&i915->mm.obj_lock);
			list_del_init(&obj->mm.link);
			spin_unlock(&i915->mm.obj_lock);
		}

4526
		mutex_unlock(&i915->drm.struct_mutex);
4527 4528

		GEM_BUG_ON(obj->bind_count);
4529
		GEM_BUG_ON(obj->userfault_count);
4530
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4531
		GEM_BUG_ON(!list_empty(&obj->lut_list));
4532 4533 4534

		if (obj->ops->release)
			obj->ops->release(obj);
4535

4536 4537
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
4538
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4539
		GEM_BUG_ON(i915_gem_object_has_pages(obj));
4540 4541 4542 4543

		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);

4544
		reservation_object_fini(&obj->__builtin_resv);
4545 4546 4547 4548 4549
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(i915, obj->base.size);

		kfree(obj->bit_17);
		i915_gem_object_free(obj);
4550 4551 4552

		if (on)
			cond_resched();
4553
	}
4554
	intel_runtime_pm_put(i915);
4555 4556 4557 4558 4559 4560
}

static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
	struct llist_node *freed;

4561 4562 4563 4564 4565 4566 4567 4568 4569 4570
	/* Free the oldest, most stale object to keep the free_list short */
	freed = NULL;
	if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
		/* Only one consumer of llist_del_first() allowed */
		spin_lock(&i915->mm.free_lock);
		freed = llist_del_first(&i915->mm.free_list);
		spin_unlock(&i915->mm.free_lock);
	}
	if (unlikely(freed)) {
		freed->next = NULL;
4571
		__i915_gem_free_objects(i915, freed);
4572
	}
4573 4574 4575 4576 4577 4578 4579
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, struct drm_i915_private, mm.free_work);
	struct llist_node *freed;
4580

4581 4582 4583 4584 4585 4586 4587
	/* All file-owned VMA should have been released by this point through
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4588

4589
	while ((freed = llist_del_all(&i915->mm.free_list))) {
4590
		__i915_gem_free_objects(i915, freed);
4591 4592 4593
		if (need_resched())
			break;
	}
4594
}
4595

4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	/* We can't simply use call_rcu() from i915_gem_free_object()
	 * as we need to block whilst unbinding, and the call_rcu
	 * task may be called from softirq context. So we take a
	 * detour through a worker.
	 */
	if (llist_add(&obj->freed, &i915->mm.free_list))
		schedule_work(&i915->mm.free_work);
}
4610

4611 4612 4613
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
C
Chris Wilson 已提交
4614

4615 4616 4617
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

4618
	if (discard_backing_storage(obj))
C
Chris Wilson 已提交
4619
		obj->mm.madv = I915_MADV_DONTNEED;
4620

4621 4622 4623 4624 4625 4626
	/* Before we free the object, make sure any pure RCU-only
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
	 */
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4627 4628
}

4629 4630 4631 4632
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4633 4634
	if (!i915_gem_object_has_active_reference(obj) &&
	    i915_gem_object_is_active(obj))
4635 4636 4637 4638 4639
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

4640 4641 4642 4643 4644 4645
static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, dev_priv, id)
4646 4647
		GEM_BUG_ON(engine->last_retired_context &&
			   !i915_gem_context_is_kernel(engine->last_retired_context));
4648 4649
}

4650 4651
void i915_gem_sanitize(struct drm_i915_private *i915)
{
4652 4653 4654 4655 4656 4657
	if (i915_terminally_wedged(&i915->gpu_error)) {
		mutex_lock(&i915->drm.struct_mutex);
		i915_gem_unset_wedged(i915);
		mutex_unlock(&i915->drm.struct_mutex);
	}

4658 4659 4660 4661 4662 4663
	/*
	 * If we inherit context state from the BIOS or earlier occupants
	 * of the GPU, the GPU may be in an inconsistent state when we
	 * try to take over. The only way to remove the earlier state
	 * is by resetting. However, resetting on earlier gen is tricky as
	 * it may impact the display and we are uncertain about the stability
4664
	 * of the reset, so this could be applied to even earlier gen.
4665
	 */
4666
	if (INTEL_GEN(i915) >= 5) {
4667 4668 4669 4670 4671
		int reset = intel_gpu_reset(i915, ALL_ENGINES);
		WARN_ON(reset && reset != -ENODEV);
	}
}

4672
int i915_gem_suspend(struct drm_i915_private *dev_priv)
4673
{
4674
	struct drm_device *dev = &dev_priv->drm;
4675
	int ret;
4676

4677
	intel_runtime_pm_get(dev_priv);
4678 4679
	intel_suspend_gt_powersave(dev_priv);

4680
	mutex_lock(&dev->struct_mutex);
4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	ret = i915_gem_switch_to_kernel_context(dev_priv);
	if (ret)
4692
		goto err_unlock;
4693

4694 4695 4696
	ret = i915_gem_wait_for_idle(dev_priv,
				     I915_WAIT_INTERRUPTIBLE |
				     I915_WAIT_LOCKED);
4697
	if (ret && ret != -EIO)
4698
		goto err_unlock;
4699

4700
	assert_kernel_context_is_current(dev_priv);
4701
	i915_gem_contexts_lost(dev_priv);
4702 4703
	mutex_unlock(&dev->struct_mutex);

4704 4705
	intel_guc_suspend(dev_priv);

4706
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4707
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4708 4709 4710 4711

	/* As the idle_work is rearming if it detects a race, play safe and
	 * repeat the flush until it is definitely idle.
	 */
4712
	drain_delayed_work(&dev_priv->gt.idle_work);
4713

4714 4715 4716
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
4717
	WARN_ON(dev_priv->gt.awake);
4718 4719
	if (WARN_ON(!intel_engines_are_idle(dev_priv)))
		i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
4720

4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
4740
	i915_gem_sanitize(dev_priv);
4741 4742 4743

	intel_runtime_pm_put(dev_priv);
	return 0;
4744

4745
err_unlock:
4746
	mutex_unlock(&dev->struct_mutex);
4747
	intel_runtime_pm_put(dev_priv);
4748
	return ret;
4749 4750
}

4751
void i915_gem_resume(struct drm_i915_private *dev_priv)
4752
{
4753
	struct drm_device *dev = &dev_priv->drm;
4754

4755 4756
	WARN_ON(dev_priv->gt.awake);

4757
	mutex_lock(&dev->struct_mutex);
4758
	i915_gem_restore_gtt_mappings(dev_priv);
4759
	i915_gem_restore_fences(dev_priv);
4760 4761 4762 4763 4764

	/* As we didn't flush the kernel context before suspend, we cannot
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
4765
	dev_priv->gt.resume(dev_priv);
4766 4767 4768 4769

	mutex_unlock(&dev->struct_mutex);
}

4770
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4771
{
4772
	if (INTEL_GEN(dev_priv) < 5 ||
4773 4774 4775 4776 4777 4778
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4779
	if (IS_GEN5(dev_priv))
4780 4781
		return;

4782
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4783
	if (IS_GEN6(dev_priv))
4784
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4785
	else if (IS_GEN7(dev_priv))
4786
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4787
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
4788
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4789 4790
	else
		BUG();
4791
}
D
Daniel Vetter 已提交
4792

4793
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4794 4795 4796 4797 4798 4799 4800
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

4801
static void init_unused_rings(struct drm_i915_private *dev_priv)
4802
{
4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
4815 4816 4817
	}
}

4818
static int __i915_gem_restart_engines(void *data)
4819
{
4820
	struct drm_i915_private *i915 = data;
4821
	struct intel_engine_cs *engine;
4822
	enum intel_engine_id id;
4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835
	int err;

	for_each_engine(engine, i915, id) {
		err = engine->init_hw(engine);
		if (err)
			return err;
	}

	return 0;
}

int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
C
Chris Wilson 已提交
4836
	int ret;
4837

4838 4839
	dev_priv->gt.last_init_time = ktime_get();

4840 4841 4842
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4843
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4844
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4845

4846
	if (IS_HASWELL(dev_priv))
4847
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4848
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4849

4850
	if (HAS_PCH_NOP(dev_priv)) {
4851
		if (IS_IVYBRIDGE(dev_priv)) {
4852 4853 4854
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
4855
		} else if (INTEL_GEN(dev_priv) >= 7) {
4856 4857 4858 4859
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4860 4861
	}

4862
	i915_gem_init_swizzling(dev_priv);
4863

4864 4865 4866 4867 4868 4869
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
4870
	init_unused_rings(dev_priv);
4871

4872
	BUG_ON(!dev_priv->kernel_context);
4873 4874 4875 4876
	if (i915_terminally_wedged(&dev_priv->gpu_error)) {
		ret = -EIO;
		goto out;
	}
4877

4878
	ret = i915_ppgtt_init_hw(dev_priv);
4879 4880 4881 4882 4883 4884
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

	/* Need to do basic initialisation of all rings first: */
4885 4886 4887
	ret = __i915_gem_restart_engines(dev_priv);
	if (ret)
		goto out;
4888

4889
	intel_mocs_init_l3cc_table(dev_priv);
4890

4891 4892 4893 4894
	/* We can't enable contexts until all firmware is loaded */
	ret = intel_uc_init_hw(dev_priv);
	if (ret)
		goto out;
4895

4896 4897
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4898
	return ret;
4899 4900
}

4901 4902 4903 4904 4905 4906
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
{
	if (INTEL_INFO(dev_priv)->gen < 6)
		return false;

	/* TODO: make semaphores and Execlists play nicely together */
4907
	if (i915_modparams.enable_execlists)
4908 4909 4910 4911 4912 4913
		return false;

	if (value >= 0)
		return value;

	/* Enable semaphores on SNB when IO remapping is off */
4914
	if (IS_GEN6(dev_priv) && intel_vtd_active())
4915 4916 4917 4918 4919
		return false;

	return true;
}

4920
int i915_gem_init(struct drm_i915_private *dev_priv)
4921 4922 4923
{
	int ret;

4924
	mutex_lock(&dev_priv->drm.struct_mutex);
4925

4926 4927 4928 4929 4930 4931 4932 4933 4934
	/*
	 * We need to fallback to 4K pages since gvt gtt handling doesn't
	 * support huge page entries - we will need to check either hypervisor
	 * mm can support huge guest page or just do emulation in gvt.
	 */
	if (intel_vgpu_active(dev_priv))
		mkwrite_device_info(dev_priv)->page_sizes =
			I915_GTT_PAGE_SIZE_4K;

4935
	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
4936

4937
	if (!i915_modparams.enable_execlists) {
4938
		dev_priv->gt.resume = intel_legacy_submission_resume;
4939
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4940
	} else {
4941
		dev_priv->gt.resume = intel_lr_context_resume;
4942
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4943 4944
	}

4945 4946 4947 4948 4949 4950 4951 4952
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4953 4954 4955
	ret = i915_gem_init_userptr(dev_priv);
	if (ret)
		goto out_unlock;
4956 4957 4958 4959

	ret = i915_gem_init_ggtt(dev_priv);
	if (ret)
		goto out_unlock;
4960

4961
	ret = i915_gem_contexts_init(dev_priv);
4962 4963
	if (ret)
		goto out_unlock;
4964

4965
	ret = intel_engines_init(dev_priv);
D
Daniel Vetter 已提交
4966
	if (ret)
4967
		goto out_unlock;
4968

4969
	ret = i915_gem_init_hw(dev_priv);
4970
	if (ret == -EIO) {
4971
		/* Allow engine initialisation to fail by marking the GPU as
4972 4973 4974
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
4975 4976 4977 4978
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
			DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
			i915_gem_set_wedged(dev_priv);
		}
4979
		ret = 0;
4980
	}
4981 4982

out_unlock:
4983
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4984
	mutex_unlock(&dev_priv->drm.struct_mutex);
4985

4986
	return ret;
4987 4988
}

4989 4990 4991 4992 4993
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
	i915_gem_sanitize(i915);
}

4994
void
4995
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
4996
{
4997
	struct intel_engine_cs *engine;
4998
	enum intel_engine_id id;
4999

5000
	for_each_engine(engine, dev_priv, id)
5001
		dev_priv->gt.cleanup_engine(engine);
5002 5003
}

5004 5005 5006
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
5007
	int i;
5008 5009 5010 5011

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
5012 5013 5014
	else if (INTEL_INFO(dev_priv)->gen >= 4 ||
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
		 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5015 5016 5017 5018
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

5019
	if (intel_vgpu_active(dev_priv))
5020 5021 5022 5023
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
5024 5025 5026 5027 5028 5029 5030
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
5031
	i915_gem_restore_fences(dev_priv);
5032

5033
	i915_gem_detect_bit_6_swizzle(dev_priv);
5034 5035
}

5036
int
5037
i915_gem_load_init(struct drm_i915_private *dev_priv)
5038
{
5039
	int err = -ENOMEM;
5040

5041 5042
	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->objects)
5043 5044
		goto err_out;

5045 5046
	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->vmas)
5047 5048
		goto err_objects;

5049 5050 5051 5052
	dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
	if (!dev_priv->luts)
		goto err_vmas;

5053 5054 5055
	dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
					SLAB_HWCACHE_ALIGN |
					SLAB_RECLAIM_ACCOUNT |
5056
					SLAB_TYPESAFE_BY_RCU);
5057
	if (!dev_priv->requests)
5058
		goto err_luts;
5059

5060 5061 5062 5063 5064 5065
	dev_priv->dependencies = KMEM_CACHE(i915_dependency,
					    SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT);
	if (!dev_priv->dependencies)
		goto err_requests;

5066 5067 5068 5069
	dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->priorities)
		goto err_dependencies;

5070 5071
	mutex_lock(&dev_priv->drm.struct_mutex);
	INIT_LIST_HEAD(&dev_priv->gt.timelines);
5072
	err = i915_gem_timeline_init__global(dev_priv);
5073 5074
	mutex_unlock(&dev_priv->drm.struct_mutex);
	if (err)
5075
		goto err_priorities;
5076

5077
	INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
5078 5079

	spin_lock_init(&dev_priv->mm.obj_lock);
5080
	spin_lock_init(&dev_priv->mm.free_lock);
5081
	init_llist_head(&dev_priv->mm.free_list);
C
Chris Wilson 已提交
5082 5083
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5084
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5085
	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
5086

5087
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5088
			  i915_gem_retire_work_handler);
5089
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5090
			  i915_gem_idle_work_handler);
5091
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5092
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5093

5094 5095
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

5096
	spin_lock_init(&dev_priv->fb_tracking.lock);
5097

M
Matthew Auld 已提交
5098 5099 5100 5101
	err = i915_gemfs_init(dev_priv);
	if (err)
		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);

5102 5103
	return 0;

5104 5105
err_priorities:
	kmem_cache_destroy(dev_priv->priorities);
5106 5107
err_dependencies:
	kmem_cache_destroy(dev_priv->dependencies);
5108 5109
err_requests:
	kmem_cache_destroy(dev_priv->requests);
5110 5111
err_luts:
	kmem_cache_destroy(dev_priv->luts);
5112 5113 5114 5115 5116 5117
err_vmas:
	kmem_cache_destroy(dev_priv->vmas);
err_objects:
	kmem_cache_destroy(dev_priv->objects);
err_out:
	return err;
5118
}
5119

5120
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
5121
{
5122
	i915_gem_drain_freed_objects(dev_priv);
5123
	WARN_ON(!llist_empty(&dev_priv->mm.free_list));
5124
	WARN_ON(dev_priv->mm.object_count);
5125

5126 5127 5128 5129 5130
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
	WARN_ON(!list_empty(&dev_priv->gt.timelines));
	mutex_unlock(&dev_priv->drm.struct_mutex);

5131
	kmem_cache_destroy(dev_priv->priorities);
5132
	kmem_cache_destroy(dev_priv->dependencies);
5133
	kmem_cache_destroy(dev_priv->requests);
5134
	kmem_cache_destroy(dev_priv->luts);
5135 5136
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
5137 5138 5139

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
M
Matthew Auld 已提交
5140 5141

	i915_gemfs_fini(dev_priv);
5142 5143
}

5144 5145
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
5146 5147 5148
	/* Discard all purgeable objects, let userspace recover those as
	 * required after resuming.
	 */
5149 5150 5151 5152 5153
	i915_gem_shrink_all(dev_priv);

	return 0;
}

5154 5155 5156
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
5157 5158 5159 5160 5161
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
5162 5163 5164 5165 5166 5167 5168 5169 5170 5171

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
5172 5173
	 *
	 * To try and reduce the hibernation image, we manually shrink
5174
	 * the objects as well, see i915_gem_freeze()
5175 5176
	 */

5177
	i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
5178
	i915_gem_drain_freed_objects(dev_priv);
5179

5180
	spin_lock(&dev_priv->mm.obj_lock);
5181
	for (p = phases; *p; p++) {
5182
		list_for_each_entry(obj, *p, mm.link)
5183
			__start_cpu_write(obj);
5184
	}
5185
	spin_unlock(&dev_priv->mm.obj_lock);
5186 5187 5188 5189

	return 0;
}

5190
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5191
{
5192
	struct drm_i915_file_private *file_priv = file->driver_priv;
5193
	struct drm_i915_gem_request *request;
5194 5195 5196 5197 5198

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
5199
	spin_lock(&file_priv->mm.lock);
5200
	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5201
		request->file_priv = NULL;
5202
	spin_unlock(&file_priv->mm.lock);
5203 5204
}

5205
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5206 5207
{
	struct drm_i915_file_private *file_priv;
5208
	int ret;
5209

5210
	DRM_DEBUG("\n");
5211 5212 5213 5214 5215 5216

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
5217
	file_priv->dev_priv = i915;
5218
	file_priv->file = file;
5219 5220 5221 5222

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

5223
	file_priv->bsd_engine = -1;
5224

5225
	ret = i915_gem_context_open(i915, file);
5226 5227
	if (ret)
		kfree(file_priv);
5228

5229
	return ret;
5230 5231
}

5232 5233
/**
 * i915_gem_track_fb - update frontbuffer tracking
5234 5235 5236
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
5237 5238 5239 5240
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
5241 5242 5243 5244
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
5245 5246 5247 5248 5249 5250 5251 5252 5253
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

5254
	if (old) {
5255 5256
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5257 5258 5259
	}

	if (new) {
5260 5261
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5262 5263 5264
	}
}

5265 5266
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
5267
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5268 5269 5270
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
5271 5272 5273
	struct file *file;
	size_t offset;
	int err;
5274

5275
	obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5276
	if (IS_ERR(obj))
5277 5278
		return obj;

5279
	GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
5280

5281 5282 5283 5284 5285 5286
	file = obj->base.filp;
	offset = 0;
	do {
		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
		struct page *page;
		void *pgdata, *vaddr;
5287

5288 5289 5290 5291 5292
		err = pagecache_write_begin(file, file->f_mapping,
					    offset, len, 0,
					    &page, &pgdata);
		if (err < 0)
			goto fail;
5293

5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307
		vaddr = kmap(page);
		memcpy(vaddr, data, len);
		kunmap(page);

		err = pagecache_write_end(file, file->f_mapping,
					  offset, len, len,
					  page, pgdata);
		if (err < 0)
			goto fail;

		size -= len;
		data += len;
		offset += len;
	} while (size);
5308 5309 5310 5311

	return obj;

fail:
5312
	i915_gem_object_put(obj);
5313
	return ERR_PTR(err);
5314
}
5315 5316 5317 5318 5319 5320

struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
C
Chris Wilson 已提交
5321
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5322 5323 5324 5325 5326
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
C
Chris Wilson 已提交
5327
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		unsigned long exception, i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		exception =
			RADIX_TREE_EXCEPTIONAL_ENTRY |
			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i,
						(void *)exception);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radixtree will contain an exceptional entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(radix_tree_exception(sg))) {
		unsigned long base =
			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
C
Chris Wilson 已提交
5452
	if (!obj->mm.dirty)
5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467
		set_page_dirty(page);

	return page;
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
5468

5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
	struct sg_table *pages;
	int err;

	if (align > obj->base.size)
		return -EINVAL;

	if (obj->ops == &i915_gem_phys_ops)
		return 0;

	if (obj->ops != &i915_gem_object_ops)
		return -EINVAL;

	err = i915_gem_object_unbind(obj);
	if (err)
		return err;

	mutex_lock(&obj->mm.lock);

	if (obj->mm.madv != I915_MADV_WILLNEED) {
		err = -EFAULT;
		goto err_unlock;
	}

	if (obj->mm.quirked) {
		err = -EFAULT;
		goto err_unlock;
	}

	if (obj->mm.mapping) {
		err = -EBUSY;
		goto err_unlock;
	}

5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514
	pages = fetch_and_zero(&obj->mm.pages);
	if (pages) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);

		__i915_gem_object_reset_page_iter(obj);

		spin_lock(&i915->mm.obj_lock);
		list_del(&obj->mm.link);
		spin_unlock(&i915->mm.obj_lock);
	}

5515 5516
	obj->ops = &i915_gem_phys_ops;

5517
	err = ____i915_gem_object_get_pages(obj);
5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536
	if (err)
		goto err_xfer;

	/* Perma-pin (until release) the physical set of pages */
	__i915_gem_object_pin_pages(obj);

	if (!IS_ERR_OR_NULL(pages))
		i915_gem_object_ops.put_pages(obj, pages);
	mutex_unlock(&obj->mm.lock);
	return 0;

err_xfer:
	obj->ops = &i915_gem_object_ops;
	obj->mm.pages = pages;
err_unlock:
	mutex_unlock(&obj->mm.lock);
	return err;
}

5537 5538
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
5539
#include "selftests/mock_gem_device.c"
5540
#include "selftests/huge_gem_object.c"
M
Matthew Auld 已提交
5541
#include "selftests/huge_pages.c"
5542
#include "selftests/i915_gem_object.c"
5543
#include "selftests/i915_gem_coherency.c"
5544
#endif